Commit 78971825 authored by osku's avatar osku

Remove "ibool comp" from dict_table_t and replace it with "ulint flags"

which can contain the new flag DICT_TF_COMPACT. Change dict_mem_table_create
to take a flags argument. Add dict_table_is_comp(). Adapt all users.

Change some places to explicitly assume that system tables do not use the
compact page format.
parent 8505ff17
......@@ -144,7 +144,7 @@ btr_root_get(
root = btr_page_get(space, root_page_no, RW_X_LATCH, mtr);
ut_a((ibool)!!page_is_comp(root) ==
UT_LIST_GET_FIRST(tree->tree_indexes)->table->comp);
dict_table_is_comp(UT_LIST_GET_FIRST(tree->tree_indexes)->table));
return(root);
}
......@@ -259,7 +259,7 @@ btr_page_create(
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
MTR_MEMO_PAGE_X_FIX));
page_create(page, mtr,
UT_LIST_GET_FIRST(tree->tree_indexes)->table->comp);
dict_table_is_comp(UT_LIST_GET_FIRST(tree->tree_indexes)->table));
buf_block_align(page)->check_index_page_at_flush = TRUE;
btr_page_set_index_id(page, tree->id, mtr);
......@@ -846,7 +846,7 @@ btr_page_reorganize_low(
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
MTR_MEMO_PAGE_X_FIX));
ut_ad(!!page_is_comp(page) == index->table->comp);
ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
data_size1 = page_get_data_size(page);
max_ins_size1 = page_get_max_insert_size_after_reorganize(page, 1);
......@@ -2011,7 +2011,7 @@ btr_compress(
page = btr_cur_get_page(cursor);
tree = btr_cur_get_tree(cursor);
comp = page_is_comp(page);
ut_a((ibool)!!comp == cursor->index->table->comp);
ut_a((ibool)!!comp == dict_table_is_comp(cursor->index->table));
ut_ad(mtr_memo_contains(mtr, dict_tree_get_lock(tree),
MTR_MEMO_X_LOCK));
......@@ -2507,11 +2507,13 @@ btr_index_rec_validate(
return(TRUE);
}
if (UNIV_UNLIKELY((ibool)!!page_is_comp(page) != index->table->comp)) {
if (UNIV_UNLIKELY((ibool)!!page_is_comp(page)
!= dict_table_is_comp(index->table))) {
btr_index_rec_validate_report(page, rec, index);
fprintf(stderr, "InnoDB: compact flag=%lu, should be %lu\n",
(ulong) !!page_is_comp(page),
(ulong) index->table->comp);
(ulong) dict_table_is_comp(index->table));
return(FALSE);
}
......
......@@ -513,7 +513,7 @@ btr_cur_search_to_nth_level(
page = btr_page_get(space,
page_no, RW_X_LATCH, mtr);
ut_a((ibool)!!page_is_comp(page)
== index->table->comp);
== dict_table_is_comp(index->table));
}
break;
......@@ -1304,7 +1304,7 @@ btr_cur_update_in_place_log(
byte* log_ptr;
page_t* page = ut_align_down(rec, UNIV_PAGE_SIZE);
ut_ad(flags < 256);
ut_ad(!!page_is_comp(page) == index->table->comp);
ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
log_ptr = mlog_open_and_write_index(mtr, rec, index, page_is_comp(page)
? MLOG_COMP_REC_UPDATE_IN_PLACE
......@@ -1390,7 +1390,7 @@ btr_cur_parse_update_in_place(
goto func_exit;
}
ut_a((ibool)!!page_is_comp(page) == index->table->comp);
ut_a((ibool)!!page_is_comp(page) == dict_table_is_comp(index->table));
rec = page + rec_offset;
/* We do not need to reserve btr_search_latch, as the page is only
......@@ -1443,7 +1443,7 @@ btr_cur_update_in_place(
rec = btr_cur_get_rec(cursor);
index = cursor->index;
ut_ad(!!page_rec_is_comp(rec) == index->table->comp);
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
trx = thr_get_trx(thr);
offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
#ifdef UNIV_DEBUG
......@@ -1466,7 +1466,7 @@ btr_cur_update_in_place(
block = buf_block_align(rec);
ut_ad(!!page_is_comp(buf_block_get_frame(block))
== index->table->comp);
== dict_table_is_comp(index->table));
if (block->is_hashed) {
/* The function row_upd_changes_ord_field_binary works only
......@@ -1558,7 +1558,7 @@ btr_cur_optimistic_update(
page = btr_cur_get_page(cursor);
rec = btr_cur_get_rec(cursor);
index = cursor->index;
ut_ad(!!page_rec_is_comp(rec) == index->table->comp);
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
heap = mem_heap_create(1024);
offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);
......@@ -2009,7 +2009,7 @@ btr_cur_del_mark_set_clust_rec_log(
ut_ad(flags < 256);
ut_ad(val <= 1);
ut_ad(!!page_rec_is_comp(rec) == index->table->comp);
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
log_ptr = mlog_open_and_write_index(mtr, rec, index,
page_rec_is_comp(rec)
......@@ -2056,7 +2056,8 @@ btr_cur_parse_del_mark_set_clust_rec(
ulint offset;
rec_t* rec;
ut_ad(!page || !!page_is_comp(page) == index->table->comp);
ut_ad(!page
|| !!page_is_comp(page) == dict_table_is_comp(index->table));
if (end_ptr < ptr + 2) {
......@@ -2142,7 +2143,7 @@ btr_cur_del_mark_set_clust_rec(
rec = btr_cur_get_rec(cursor);
index = cursor->index;
ut_ad(!!page_rec_is_comp(rec) == index->table->comp);
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
#ifdef UNIV_DEBUG
......@@ -2315,7 +2316,7 @@ btr_cur_del_mark_set_sec_rec(
block = buf_block_align(rec);
ut_ad(!!page_is_comp(buf_block_get_frame(block))
== cursor->index->table->comp);
== dict_table_is_comp(cursor->index->table));
if (block->is_hashed) {
rw_lock_x_lock(&btr_search_latch);
......@@ -3640,7 +3641,7 @@ btr_rec_free_externally_stored_fields(
MTR_MEMO_PAGE_X_FIX));
/* Free possible externally stored fields in the record */
ut_ad(index->table->comp == !!rec_offs_comp(offsets));
ut_ad(dict_table_is_comp(index->table) == !!rec_offs_comp(offsets));
n_fields = rec_offs_n_fields(offsets);
for (i = 0; i < n_fields; i++) {
......
......@@ -525,9 +525,9 @@ dtuple_convert_big_rec(
n_fields = 0;
while (rec_get_converted_size(index, entry)
>= ut_min(page_get_free_space_of_empty(
index->table->comp) / 2,
REC_MAX_DATA_SIZE)) {
>= ut_min(page_get_free_space_of_empty(
dict_table_is_comp(index->table)) / 2,
REC_MAX_DATA_SIZE)) {
longest = 0;
for (i = dict_index_get_n_unique_in_tree(index);
......
......@@ -245,7 +245,7 @@ dict_boot(void)
/* Insert into the dictionary cache the descriptions of the basic
system tables */
/*-------------------------*/
table = dict_mem_table_create("SYS_TABLES", DICT_HDR_SPACE, 8, FALSE);
table = dict_mem_table_create("SYS_TABLES", DICT_HDR_SPACE, 8, 0);
dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0, 0);
dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0, 0);
......@@ -281,7 +281,7 @@ dict_boot(void)
dict_hdr + DICT_HDR_TABLE_IDS, MLOG_4BYTES, &mtr));
ut_a(success);
/*-------------------------*/
table = dict_mem_table_create("SYS_COLUMNS", DICT_HDR_SPACE, 7, FALSE);
table = dict_mem_table_create("SYS_COLUMNS", DICT_HDR_SPACE, 7, 0);
dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY,0,0,0);
dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4, 0);
......@@ -307,7 +307,7 @@ dict_boot(void)
dict_hdr + DICT_HDR_COLUMNS, MLOG_4BYTES, &mtr));
ut_a(success);
/*-------------------------*/
table = dict_mem_table_create("SYS_INDEXES", DICT_HDR_SPACE, 7, FALSE);
table = dict_mem_table_create("SYS_INDEXES", DICT_HDR_SPACE, 7, 0);
dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY, 0,0,0);
dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0, 0);
......@@ -343,7 +343,7 @@ dict_boot(void)
dict_hdr + DICT_HDR_INDEXES, MLOG_4BYTES, &mtr));
ut_a(success);
/*-------------------------*/
table = dict_mem_table_create("SYS_FIELDS", DICT_HDR_SPACE, 3, FALSE);
table = dict_mem_table_create("SYS_FIELDS", DICT_HDR_SPACE, 3, 0);
dict_mem_table_add_col(table, "INDEX_ID", DATA_BINARY, 0,0,0);
dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4, 0);
......
......@@ -62,9 +62,13 @@ dict_create_sys_tables_tuple(
/* 4: N_COLS ---------------------------*/
dfield = dtuple_get_nth_field(entry, 2);
#if DICT_TF_COMPACT != 1
#error
#endif
ptr = mem_heap_alloc(heap, 4);
mach_write_to_4(ptr, table->n_def
| ((ulint) table->comp << 31));
| ((table->flags & DICT_TF_COMPACT) << 31));
dfield_set_data(dfield, ptr, 4);
/* 5: TYPE -----------------------------*/
dfield = dtuple_get_nth_field(entry, 3);
......@@ -634,7 +638,7 @@ dict_create_index_tree_step(
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
node->page_no = btr_create(index->type, index->space, index->id,
table->comp, &mtr);
dict_table_is_comp(table), &mtr);
/* printf("Created a new index tree in space %lu root page %lu\n",
index->space, index->page_no); */
......@@ -671,7 +675,7 @@ dict_drop_index_tree(
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(!dict_sys->sys_indexes->comp);
ut_a(!dict_table_is_comp(dict_sys->sys_indexes));
ptr = rec_get_nth_field_old(rec, DICT_SYS_INDEXES_PAGE_NO_FIELD, &len);
ut_ad(len == 4);
......@@ -743,7 +747,7 @@ dict_truncate_index_tree(
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
ut_a(!dict_sys->sys_indexes->comp);
ut_a(!dict_table_is_comp(dict_sys->sys_indexes));
ptr = rec_get_nth_field_old(rec, DICT_SYS_INDEXES_PAGE_NO_FIELD, &len);
ut_ad(len == 4);
......
......@@ -3691,7 +3691,7 @@ dict_tree_find_index_low(
&& UNIV_UNLIKELY(table->type != DICT_TABLE_ORDINARY)) {
/* Get the mix id of the record */
ut_a(!table->comp);
ut_a(!dict_table_is_comp(table));
mix_id = mach_dulint_read_compressed(
rec_get_nth_field_old(rec, table->mix_len, &len));
......@@ -3787,7 +3787,7 @@ dict_is_mixed_table_rec(
byte* mix_id_field;
ulint len;
ut_ad(!table->comp);
ut_ad(!dict_table_is_comp(table));
mix_id_field = rec_get_nth_field_old(rec,
table->mix_len, &len);
......@@ -3850,7 +3850,7 @@ dict_tree_build_node_ptr(
on non-leaf levels we remove the last field, which
contains the page number of the child page */
ut_a(!ind->table->comp);
ut_a(!dict_table_is_comp(ind->table));
n_unique = rec_get_n_fields_old(rec);
if (level > 0) {
......@@ -3913,7 +3913,7 @@ dict_tree_copy_rec_order_prefix(
index = dict_tree_find_index_low(tree, rec);
if (UNIV_UNLIKELY(tree->type & DICT_UNIVERSAL)) {
ut_a(!index->table->comp);
ut_a(!dict_table_is_comp(index->table));
n = rec_get_n_fields_old(rec);
} else {
n = dict_index_get_n_unique_in_tree(index);
......@@ -3940,7 +3940,8 @@ dict_tree_build_data_tuple(
ind = dict_tree_find_index_low(tree, rec);
ut_ad(ind->table->comp || n_fields <= rec_get_n_fields_old(rec));
ut_ad(dict_table_is_comp(ind->table)
|| n_fields <= rec_get_n_fields_old(rec));
tuple = dtuple_create(heap, n_fields);
......@@ -3964,7 +3965,7 @@ dict_index_calc_min_rec_len(
ulint sum = 0;
ulint i;
if (UNIV_LIKELY(index->table->comp)) {
if (dict_table_is_comp(index->table)) {
ulint nullable = 0;
sum = REC_N_NEW_EXTRA_BYTES;
for (i = 0; i < dict_index_get_n_fields(index); i++) {
......
......@@ -58,7 +58,7 @@ dict_get_first_table_name_in_db(
sys_tables = dict_table_get_low("SYS_TABLES");
sys_index = UT_LIST_GET_FIRST(sys_tables->indexes);
ut_a(!sys_tables->comp);
ut_a(!dict_table_is_comp(sys_tables));
tuple = dtuple_create(heap, 1);
dfield = dtuple_get_nth_field(tuple, 0);
......@@ -94,7 +94,7 @@ dict_get_first_table_name_in_db(
return(NULL);
}
if (!rec_get_deleted_flag(rec, sys_tables->comp)) {
if (!rec_get_deleted_flag(rec, 0)) {
/* We found one */
......@@ -169,7 +169,7 @@ dict_print(void)
field = rec_get_nth_field_old(rec, 0, &len);
if (!rec_get_deleted_flag(rec, sys_tables->comp)) {
if (!rec_get_deleted_flag(rec, 0)) {
/* We found one */
......@@ -235,7 +235,7 @@ dict_check_tablespaces_and_store_max_id(
sys_tables = dict_table_get_low("SYS_TABLES");
sys_index = UT_LIST_GET_FIRST(sys_tables->indexes);
ut_a(!sys_tables->comp);
ut_a(!dict_table_is_comp(sys_tables));
btr_pcur_open_at_index_side(TRUE, sys_index, BTR_SEARCH_LEAF, &pcur,
TRUE, &mtr);
......@@ -264,7 +264,7 @@ dict_check_tablespaces_and_store_max_id(
field = rec_get_nth_field_old(rec, 0, &len);
if (!rec_get_deleted_flag(rec, sys_tables->comp)) {
if (!rec_get_deleted_flag(rec, 0)) {
/* We found one */
......@@ -343,7 +343,7 @@ dict_load_columns(
sys_columns = dict_table_get_low("SYS_COLUMNS");
sys_index = UT_LIST_GET_FIRST(sys_columns->indexes);
ut_a(!sys_columns->comp);
ut_a(!dict_table_is_comp(sys_columns));
tuple = dtuple_create(heap, 1);
dfield = dtuple_get_nth_field(tuple, 0);
......@@ -362,7 +362,7 @@ dict_load_columns(
ut_a(btr_pcur_is_on_user_rec(&pcur, &mtr));
ut_a(!rec_get_deleted_flag(rec, sys_columns->comp));
ut_a(!rec_get_deleted_flag(rec, 0));
field = rec_get_nth_field_old(rec, 0, &len);
ut_ad(len == 8);
......@@ -476,7 +476,7 @@ dict_load_fields(
sys_fields = dict_table_get_low("SYS_FIELDS");
sys_index = UT_LIST_GET_FIRST(sys_fields->indexes);
ut_a(!sys_fields->comp);
ut_a(!dict_table_is_comp(sys_fields));
tuple = dtuple_create(heap, 1);
dfield = dtuple_get_nth_field(tuple, 0);
......@@ -494,7 +494,7 @@ dict_load_fields(
rec = btr_pcur_get_rec(&pcur);
ut_a(btr_pcur_is_on_user_rec(&pcur, &mtr));
if (rec_get_deleted_flag(rec, sys_fields->comp)) {
if (rec_get_deleted_flag(rec, 0)) {
dict_load_report_deleted_index(table->name, i);
}
......@@ -589,7 +589,7 @@ dict_load_indexes(
sys_indexes = dict_table_get_low("SYS_INDEXES");
sys_index = UT_LIST_GET_FIRST(sys_indexes->indexes);
ut_a(!sys_indexes->comp);
ut_a(!dict_table_is_comp(sys_indexes));
tuple = dtuple_create(heap, 1);
dfield = dtuple_get_nth_field(tuple, 0);
......@@ -617,7 +617,7 @@ dict_load_indexes(
break;
}
if (rec_get_deleted_flag(rec, table->comp)) {
if (rec_get_deleted_flag(rec, dict_table_is_comp(table))) {
dict_load_report_deleted_index(table->name,
ULINT_UNDEFINED);
......@@ -739,6 +739,7 @@ dict_load_table(
ulint len;
ulint space;
ulint n_cols;
ulint flags;
ulint err;
mtr_t mtr;
......@@ -752,7 +753,7 @@ dict_load_table(
sys_tables = dict_table_get_low("SYS_TABLES");
sys_index = UT_LIST_GET_FIRST(sys_tables->indexes);
ut_a(!sys_tables->comp);
ut_a(!dict_table_is_comp(sys_tables));
tuple = dtuple_create(heap, 1);
dfield = dtuple_get_nth_field(tuple, 0);
......@@ -765,7 +766,7 @@ dict_load_table(
rec = btr_pcur_get_rec(&pcur);
if (!btr_pcur_is_on_user_rec(&pcur, &mtr)
|| rec_get_deleted_flag(rec, sys_tables->comp)) {
|| rec_get_deleted_flag(rec, 0)) {
/* Not found */
btr_pcur_close(&pcur);
......@@ -827,10 +828,15 @@ dict_load_table(
field = rec_get_nth_field_old(rec, 4, &len);
n_cols = mach_read_from_4(field);
flags = 0;
/* The high-order bit of N_COLS is the "compact format" flag. */
table = dict_mem_table_create(name, space,
n_cols & ~0x80000000UL,
!!(n_cols & 0x80000000UL));
if (n_cols & 0x80000000UL) {
flags |= DICT_TF_COMPACT;
}
table = dict_mem_table_create(name, space, n_cols & ~0x80000000UL,
flags);
table->ibd_file_missing = ibd_file_missing;
......@@ -938,7 +944,7 @@ dict_load_table_on_id(
sys_tables = dict_sys->sys_tables;
sys_table_ids = dict_table_get_next_index(
dict_table_get_first_index(sys_tables));
ut_a(!sys_tables->comp);
ut_a(!dict_table_is_comp(sys_tables));
heap = mem_heap_create(256);
tuple = dtuple_create(heap, 1);
......@@ -955,7 +961,7 @@ dict_load_table_on_id(
rec = btr_pcur_get_rec(&pcur);
if (!btr_pcur_is_on_user_rec(&pcur, &mtr)
|| rec_get_deleted_flag(rec, sys_tables->comp)) {
|| rec_get_deleted_flag(rec, 0)) {
/* Not found */
btr_pcur_close(&pcur);
......@@ -1052,7 +1058,7 @@ dict_load_foreign_cols(
sys_foreign_cols = dict_table_get_low("SYS_FOREIGN_COLS");
sys_index = UT_LIST_GET_FIRST(sys_foreign_cols->indexes);
ut_a(!sys_foreign_cols->comp);
ut_a(!dict_table_is_comp(sys_foreign_cols));
tuple = dtuple_create(foreign->heap, 1);
dfield = dtuple_get_nth_field(tuple, 0);
......@@ -1067,7 +1073,7 @@ dict_load_foreign_cols(
rec = btr_pcur_get_rec(&pcur);
ut_a(btr_pcur_is_on_user_rec(&pcur, &mtr));
ut_a(!rec_get_deleted_flag(rec, sys_foreign_cols->comp));
ut_a(!rec_get_deleted_flag(rec, 0));
field = rec_get_nth_field_old(rec, 0, &len);
ut_a(len == ut_strlen(id));
......@@ -1125,7 +1131,7 @@ dict_load_foreign(
sys_foreign = dict_table_get_low("SYS_FOREIGN");
sys_index = UT_LIST_GET_FIRST(sys_foreign->indexes);
ut_a(!sys_foreign->comp);
ut_a(!dict_table_is_comp(sys_foreign));
tuple = dtuple_create(heap2, 1);
dfield = dtuple_get_nth_field(tuple, 0);
......@@ -1138,7 +1144,7 @@ dict_load_foreign(
rec = btr_pcur_get_rec(&pcur);
if (!btr_pcur_is_on_user_rec(&pcur, &mtr)
|| rec_get_deleted_flag(rec, sys_foreign->comp)) {
|| rec_get_deleted_flag(rec, 0)) {
/* Not found */
fprintf(stderr,
......@@ -1260,7 +1266,7 @@ dict_load_foreigns(
return(DB_ERROR);
}
ut_a(!sys_foreign->comp);
ut_a(!dict_table_is_comp(sys_foreign));
mtr_start(&mtr);
/* Get the secondary index based on FOR_NAME from table
......@@ -1315,7 +1321,7 @@ dict_load_foreigns(
goto next_rec;
}
if (rec_get_deleted_flag(rec, sys_foreign->comp)) {
if (rec_get_deleted_flag(rec, 0)) {
goto next_rec;
}
......
......@@ -36,13 +36,13 @@ dict_mem_table_create(
ignored if the table is made a member of
a cluster */
ulint n_cols, /* in: number of columns */
ibool comp) /* in: TRUE=compact page format */
ulint flags) /* in: table flags */
{
dict_table_t* table;
mem_heap_t* heap;
ut_ad(name);
ut_ad(comp == FALSE || comp == TRUE);
ut_ad(!(flags & ~DICT_TF_COMPACT));
heap = mem_heap_create(DICT_HEAP_SIZE);
......@@ -51,12 +51,12 @@ dict_mem_table_create(
table->heap = heap;
table->type = DICT_TABLE_ORDINARY;
table->flags = flags;
table->name = mem_heap_strdup(heap, name);
table->dir_path_of_temp_table = NULL;
table->space = space;
table->ibd_file_missing = FALSE;
table->tablespace_discarded = FALSE;
table->comp = comp;
table->n_def = 0;
table->n_cols = n_cols + DATA_N_SYS_COLS;
table->mem_fix = 0;
......@@ -114,7 +114,7 @@ dict_mem_cluster_create(
dict_table_t* cluster;
/* Clustered tables cannot work with the compact record format. */
cluster = dict_mem_table_create(name, space, n_cols, FALSE);
cluster = dict_mem_table_create(name, space, n_cols, 0);
cluster->type = DICT_TABLE_CLUSTER;
cluster->mix_len = mix_len;
......
......@@ -2306,7 +2306,7 @@ ha_innobase::get_row_type() const
row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
if (prebuilt && prebuilt->table) {
if (prebuilt->table->comp) {
if (dict_table_is_comp(prebuilt->table)) {
return(ROW_TYPE_COMPACT);
} else {
return(ROW_TYPE_REDUNDANT);
......@@ -3694,12 +3694,12 @@ calc_row_difference(
if (n_len != UNIV_SQL_NULL) {
buf = row_mysql_store_col_in_innobase_format(
&dfield,
(byte*)buf,
TRUE,
new_mysql_row_col,
col_pack_len,
prebuilt->table->comp);
&dfield,
(byte*)buf,
TRUE,
new_mysql_row_col,
col_pack_len,
dict_table_is_comp(prebuilt->table));
ufield->new_val.data = dfield.data;
ufield->new_val.len = dfield.len;
} else {
......@@ -4583,7 +4583,7 @@ create_table_def(
an .ibd file for it (no .ibd extension
in the path, though); otherwise this
is NULL */
ibool comp) /* in: TRUE=compact record format */
ulint flags) /* in: table flags */
{
Field* field;
dict_table_t* table;
......@@ -4606,7 +4606,7 @@ create_table_def(
/* We pass 0 as the space id, and determine at a lower level the space
id where to store the table */
table = dict_mem_table_create(table_name, 0, n_cols, comp);
table = dict_mem_table_create(table_name, 0, n_cols, flags);
if (path_of_temp_table) {
table->dir_path_of_temp_table =
......@@ -4852,6 +4852,7 @@ ha_innobase::create(
char norm_name[FN_REFLEN];
THD *thd= current_thd;
ib_longlong auto_inc_value;
ulint flags;
DBUG_ENTER("ha_innobase::create");
......@@ -4905,9 +4906,15 @@ ha_innobase::create(
/* Create the table definition in InnoDB */
flags = 0;
if (form->s->row_type != ROW_TYPE_REDUNDANT) {
flags |= DICT_TF_COMPACT;
}
error = create_table_def(trx, form, norm_name,
create_info->options & HA_LEX_CREATE_TMP_TABLE ? name2 : NULL,
form->s->row_type != ROW_TYPE_REDUNDANT);
flags);
if (error) {
goto cleanup;
......
......@@ -532,7 +532,7 @@ ibuf_data_init_for_space(
sprintf(buf, "SYS_IBUF_TABLE_%lu", (ulong) space);
/* use old-style record format for the insert buffer */
table = dict_mem_table_create(buf, space, 2, FALSE);
table = dict_mem_table_create(buf, space, 2, 0);
dict_mem_table_add_col(table, "PAGE_NO", DATA_BINARY, 0, 0, 0);
dict_mem_table_add_col(table, "TYPES", DATA_BINARY, 0, 0, 0);
......@@ -1127,13 +1127,18 @@ ibuf_dummy_index_create(
{
dict_table_t* table;
dict_index_t* index;
table = dict_mem_table_create("IBUF_DUMMY",
DICT_HDR_SPACE, n, comp);
DICT_HDR_SPACE, n, comp ? DICT_TF_COMPACT : 0);
index = dict_mem_index_create("IBUF_DUMMY", "IBUF_DUMMY",
DICT_HDR_SPACE, 0, n);
DICT_HDR_SPACE, 0, n);
index->table = table;
/* avoid ut_ad(index->cached) in dict_index_get_n_unique_in_tree */
index->cached = TRUE;
return(index);
}
/************************************************************************
......@@ -2454,7 +2459,7 @@ ibuf_update_max_tablespace_id(void)
ibuf_data = fil_space_get_ibuf_data(0);
ibuf_index = ibuf_data->index;
ut_a(!ibuf_index->table->comp);
ut_a(!dict_table_is_comp(ibuf_index->table));
ibuf_enter();
......@@ -2598,8 +2603,8 @@ ibuf_insert_low(
the first fields and the type information for other fields, and which
will be inserted to the insert buffer. */
ibuf_entry = ibuf_entry_build(entry, index->table->comp,
space, page_no, heap);
ibuf_entry = ibuf_entry_build(entry, dict_table_is_comp(index->table),
space, page_no, heap);
/* Open a cursor to the insert buffer tree to calculate if we can add
the new entry to it without exceeding the free space limit for the
......@@ -2769,7 +2774,8 @@ ibuf_insert(
ut_a(!(index->type & DICT_CLUSTERED));
if (rec_get_converted_size(index, entry)
>= page_get_free_space_of_empty(index->table->comp) / 2) {
>= page_get_free_space_of_empty(
dict_table_is_comp(index->table)) / 2) {
return(FALSE);
}
......@@ -2816,7 +2822,8 @@ ibuf_insert_to_index_page(
ut_ad(ibuf_inside());
ut_ad(dtuple_check_typed(entry));
if (UNIV_UNLIKELY(index->table->comp != (ibool)!!page_is_comp(page))) {
if (UNIV_UNLIKELY(dict_table_is_comp(index->table)
!= (ibool)!!page_is_comp(page))) {
fputs(
"InnoDB: Trying to insert a record from the insert buffer to an index page\n"
"InnoDB: but the 'compact' flag does not match!\n", stderr);
......
......@@ -53,7 +53,8 @@ btr_cur_get_page(
btr_cur_t* cursor) /* in: tree cursor */
{
page_t* page = buf_frame_align(page_cur_get_rec(&(cursor->page_cur)));
ut_ad(!!page_is_comp(page) == cursor->index->table->comp);
ut_ad(!!page_is_comp(page)
== dict_table_is_comp(cursor->index->table));
return(page);
}
......
......@@ -487,6 +487,15 @@ dict_table_get_sys_col_no(
dict_table_t* table, /* in: table */
ulint sys); /* in: DATA_ROW_ID, ... */
/************************************************************************
Check whether the table uses the compact page format. */
UNIV_INLINE
ibool
dict_table_is_comp(
/*===============*/
/* out: TRUE if table uses the
compact page format */
const dict_table_t* table); /* in: table */
/************************************************************************
Checks if a column is in the ordering columns of the clustered index of a
table. Column prefixes are treated like whole columns. */
......
......@@ -189,6 +189,25 @@ dict_table_get_sys_col_no(
return(table->n_cols - DATA_N_SYS_COLS + sys);
}
/************************************************************************
Check whether the table uses the compact page format. */
UNIV_INLINE
ibool
dict_table_is_comp(
/*===============*/
/* out: TRUE if table uses the
compact page format */
const dict_table_t* table) /* in: table */
{
ut_ad(table);
#if DICT_TF_COMPACT != TRUE
#error
#endif
return(UNIV_LIKELY(table->flags & DICT_TF_COMPACT));
}
/************************************************************************
Gets the number of fields in the internal representation of an index,
including fields added by the dictionary system. */
......
......@@ -39,6 +39,9 @@ combination of types */
#define DICT_TABLE_CLUSTER 3 /* this means that the table is
really a cluster definition */
/* Table flags */
#define DICT_TF_COMPACT 1 /* compact page format */
/**************************************************************************
Creates a table memory object. */
......@@ -52,7 +55,7 @@ dict_mem_table_create(
is ignored if the table is made
a member of a cluster */
ulint n_cols, /* in: number of columns */
ibool comp); /* in: TRUE=compact page format */
ulint flags); /* in: table flags */
/**************************************************************************
Creates a cluster memory object. */
......@@ -300,6 +303,7 @@ a foreign key constraint is enforced, therefore RESTRICT just means no flag */
struct dict_table_struct{
dulint id; /* id of the table or cluster */
ulint type; /* DICT_TABLE_ORDINARY, ... */
ulint flags; /* DICT_TF_COMPACT, ... */
mem_heap_t* heap; /* memory heap */
const char* name; /* table name */
const char* dir_path_of_temp_table;/* NULL or the directory path
......@@ -317,7 +321,6 @@ struct dict_table_struct{
ibool tablespace_discarded;/* this flag is set TRUE when the
user calls DISCARD TABLESPACE on this table,
and reset to FALSE in IMPORT TABLESPACE */
ibool comp; /* flag: TRUE=compact page format */
hash_node_t name_hash; /* hash chain node */
hash_node_t id_hash; /* hash chain node */
ulint n_def; /* number of columns defined so far */
......
......@@ -421,9 +421,11 @@ rec_get_n_fields(
{
ut_ad(rec);
ut_ad(index);
if (UNIV_UNLIKELY(!index->table->comp)) {
if (!dict_table_is_comp(index->table)) {
return(rec_get_n_fields_old(rec));
}
switch (rec_get_status(rec)) {
case REC_STATUS_ORDINARY:
return(dict_index_get_n_fields(index));
......@@ -1026,7 +1028,7 @@ rec_set_nth_field_extern_bit(
where rec is, or NULL; in the NULL case
we do not write to log about the change */
{
if (UNIV_LIKELY(index->table->comp)) {
if (dict_table_is_comp(index->table)) {
rec_set_nth_field_extern_bit_new(rec, index, i, val, mtr);
} else {
rec_set_nth_field_extern_bit_old(rec, i, val, mtr);
......@@ -1441,7 +1443,7 @@ rec_get_converted_size(
? dict_index_get_n_unique_in_tree(index) + 1
: dict_index_get_n_fields(index)));
if (UNIV_LIKELY(index->table->comp)) {
if (dict_table_is_comp(index->table)) {
return(rec_get_converted_size_new(index, dtuple));
}
......
......@@ -1731,7 +1731,7 @@ lock_rec_create(
page_no = buf_frame_get_page_no(page);
heap_no = rec_get_heap_no(rec, page_is_comp(page));
ut_ad(!!page_is_comp(page) == index->table->comp);
ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
/* If rec is the supremum record, then we reset the gap and
LOCK_REC_NOT_GAP bits, as all locks on the supremum are
......
......@@ -769,7 +769,8 @@ recv_parse_or_apply_log_rec_body(
if (NULL != (ptr = mlog_parse_index(ptr, end_ptr,
type == MLOG_COMP_REC_INSERT, &index))) {
ut_a(!page
|| (ibool)!!page_is_comp(page)==index->table->comp);
|| (ibool)!!page_is_comp(page)
== dict_table_is_comp(index->table));
ptr = page_cur_parse_insert_rec(FALSE, ptr, end_ptr,
index, page, mtr);
}
......@@ -778,7 +779,8 @@ recv_parse_or_apply_log_rec_body(
if (NULL != (ptr = mlog_parse_index(ptr, end_ptr,
type == MLOG_COMP_REC_CLUST_DELETE_MARK, &index))) {
ut_a(!page
|| (ibool)!!page_is_comp(page)==index->table->comp);
|| (ibool)!!page_is_comp(page)
== dict_table_is_comp(index->table));
ptr = btr_cur_parse_del_mark_set_clust_rec(ptr,
end_ptr, index, page);
}
......@@ -799,7 +801,8 @@ recv_parse_or_apply_log_rec_body(
if (NULL != (ptr = mlog_parse_index(ptr, end_ptr,
type == MLOG_COMP_REC_UPDATE_IN_PLACE, &index))) {
ut_a(!page
|| (ibool)!!page_is_comp(page)==index->table->comp);
|| (ibool)!!page_is_comp(page)
== dict_table_is_comp(index->table));
ptr = btr_cur_parse_update_in_place(ptr, end_ptr,
page, index);
}
......@@ -810,7 +813,8 @@ recv_parse_or_apply_log_rec_body(
type == MLOG_COMP_LIST_END_DELETE
|| type == MLOG_COMP_LIST_START_DELETE, &index))) {
ut_a(!page
|| (ibool)!!page_is_comp(page)==index->table->comp);
|| (ibool)!!page_is_comp(page)
== dict_table_is_comp(index->table));
ptr = page_parse_delete_rec_list(type, ptr, end_ptr,
index, page, mtr);
}
......@@ -819,7 +823,8 @@ recv_parse_or_apply_log_rec_body(
if (NULL != (ptr = mlog_parse_index(ptr, end_ptr,
type == MLOG_COMP_LIST_END_COPY_CREATED, &index))) {
ut_a(!page
|| (ibool)!!page_is_comp(page)==index->table->comp);
|| (ibool)!!page_is_comp(page)
== dict_table_is_comp(index->table));
ptr = page_parse_copy_rec_list_to_created_page(ptr,
end_ptr, index, page, mtr);
}
......@@ -828,7 +833,8 @@ recv_parse_or_apply_log_rec_body(
if (NULL != (ptr = mlog_parse_index(ptr, end_ptr,
type == MLOG_COMP_PAGE_REORGANIZE, &index))) {
ut_a(!page
|| (ibool)!!page_is_comp(page)==index->table->comp);
|| (ibool)!!page_is_comp(page)
== dict_table_is_comp(index->table));
ptr = btr_parse_page_reorganize(ptr, end_ptr, index,
page, mtr);
}
......@@ -862,7 +868,8 @@ recv_parse_or_apply_log_rec_body(
if (NULL != (ptr = mlog_parse_index(ptr, end_ptr,
type == MLOG_COMP_REC_DELETE, &index))) {
ut_a(!page
|| (ibool)!!page_is_comp(page)==index->table->comp);
|| (ibool)!!page_is_comp(page)
== dict_table_is_comp(index->table));
ptr = page_cur_parse_delete_rec(ptr, end_ptr,
index, page, mtr);
}
......
......@@ -407,7 +407,7 @@ mlog_open_and_write_index(
const byte* log_start;
const byte* log_end;
ut_ad(!!page_rec_is_comp(rec) == index->table->comp);
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
if (!page_rec_is_comp(rec)) {
log_start = log_ptr = mlog_open(mtr, 11 + size);
......@@ -518,7 +518,8 @@ mlog_parse_index(
} else {
n = n_uniq = 1;
}
table = dict_mem_table_create("LOG_DUMMY", DICT_HDR_SPACE, n, comp);
table = dict_mem_table_create("LOG_DUMMY", DICT_HDR_SPACE, n,
comp ? DICT_TF_COMPACT : 0);
ind = dict_mem_index_create("LOG_DUMMY", "LOG_DUMMY",
DICT_HDR_SPACE, 0, n);
ind->table = table;
......
......@@ -532,7 +532,8 @@ page_cur_insert_rec_write_log(
ut_a(rec_size < UNIV_PAGE_SIZE);
ut_ad(buf_frame_align(insert_rec) == buf_frame_align(cursor_rec));
ut_ad(!page_rec_is_comp(insert_rec) == !index->table->comp);
ut_ad(!page_rec_is_comp(insert_rec)
== !dict_table_is_comp(index->table));
comp = page_rec_is_comp(insert_rec);
{
......@@ -773,7 +774,7 @@ page_cur_parse_insert_rec(
return(ptr + end_seg_len);
}
ut_ad(!!page_is_comp(page) == index->table->comp);
ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
/* Read from the log the inserted index record end segment which
differs from the cursor record */
......@@ -888,7 +889,7 @@ page_cur_insert_rec_low(
page = page_cur_get_page(cursor);
comp = page_is_comp(page);
ut_ad(index->table->comp == !!comp);
ut_ad(dict_table_is_comp(index->table) == !!comp);
ut_ad(cursor->rec != page_get_supremum_rec(page));
......@@ -1018,7 +1019,7 @@ page_copy_rec_list_to_created_page_write_log(
{
byte* log_ptr;
ut_ad(!!page_is_comp(page) == index->table->comp);
ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
log_ptr = mlog_open_and_write_index(mtr, page, index,
page_is_comp(page)
......@@ -1250,7 +1251,7 @@ page_cur_delete_rec_write_log(
{
byte* log_ptr;
ut_ad(!!page_rec_is_comp(rec) == index->table->comp);
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
log_ptr = mlog_open_and_write_index(mtr, rec, index,
page_rec_is_comp(rec)
......@@ -1342,7 +1343,7 @@ page_cur_delete_rec(
page = page_cur_get_page(cursor);
current_rec = cursor->rec;
ut_ad(rec_offs_validate(current_rec, index, offsets));
ut_ad(!!page_is_comp(page) == index->table->comp);
ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
/* The record must not be the supremum or infimum record. */
ut_ad(current_rec != page_get_supremum_rec(page));
......
......@@ -485,7 +485,8 @@ page_copy_rec_list_end_no_locks(
page_cur_move_to_next(&cur1);
}
ut_a((ibool)!!page_is_comp(new_page) == index->table->comp);
ut_a((ibool)!!page_is_comp(new_page)
== dict_table_is_comp(index->table));
ut_a(page_is_comp(new_page) == page_is_comp(page));
ut_a(mach_read_from_2(new_page + UNIV_PAGE_SIZE - 10) == (ulint)
(page_is_comp(new_page)
......@@ -690,7 +691,7 @@ page_parse_delete_rec_list(
return(ptr);
}
ut_ad(!!page_is_comp(page) == index->table->comp);
ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
if (type == MLOG_LIST_END_DELETE
|| type == MLOG_COMP_LIST_END_DELETE) {
......@@ -854,7 +855,7 @@ page_delete_rec_list_start(
byte type;
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
ut_ad(!!page_is_comp(page) == index->table->comp);
ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
if (page_is_comp(page)) {
type = MLOG_COMP_LIST_START_DELETE;
......@@ -1350,7 +1351,7 @@ page_print_list(
ulint* offsets = offsets_;
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
ut_a((ibool)!!page_is_comp(page) == index->table->comp);
ut_a((ibool)!!page_is_comp(page) == dict_table_is_comp(index->table));
fprintf(stderr,
"--------------------------------\n"
......@@ -1744,7 +1745,7 @@ page_validate(
ulint* offsets = NULL;
ulint* old_offsets = NULL;
if ((ibool)!!comp != index->table->comp) {
if ((ibool)!!comp != dict_table_is_comp(index->table)) {
fputs("InnoDB: 'compact format' flag mismatch\n", stderr);
goto func_exit2;
}
......
......@@ -1578,7 +1578,7 @@ pars_create_table(
/* As the InnoDB SQL parser is for internal use only,
for creating some system tables, this function will only
create tables in the old (not compact) record format. */
table = dict_mem_table_create(table_sym->name, 0, n_cols, FALSE);
table = dict_mem_table_create(table_sym->name, 0, n_cols, 0);
if (not_fit_in_memory != NULL) {
table->does_not_fit_in_memory = TRUE;
......
......@@ -164,7 +164,7 @@ rec_init_offsets(
rec_offs_make_valid(rec, index, offsets);
if (UNIV_LIKELY(index->table->comp)) {
if (dict_table_is_comp(index->table)) {
const byte* nulls;
const byte* lens;
dict_field_t* field;
......@@ -321,7 +321,7 @@ rec_get_offsets_func(
ut_ad(index);
ut_ad(heap);
if (UNIV_LIKELY(index->table->comp)) {
if (dict_table_is_comp(index->table)) {
switch (UNIV_EXPECT(rec_get_status(rec),
REC_STATUS_ORDINARY)) {
case REC_STATUS_ORDINARY:
......@@ -446,7 +446,7 @@ rec_get_converted_size_new(
ulint i;
ulint n_fields;
ut_ad(index && dtuple);
ut_ad(index->table->comp);
ut_ad(dict_table_is_comp(index->table));
switch (dtuple_get_info_bits(dtuple) & REC_NEW_STATUS_MASK) {
case REC_STATUS_ORDINARY:
......@@ -592,7 +592,7 @@ rec_set_nth_field_extern_bit_new(
ulint n_fields;
ulint null_mask = 1;
ut_ad(rec && index);
ut_ad(index->table->comp);
ut_ad(dict_table_is_comp(index->table));
ut_ad(rec_get_status(rec) == REC_STATUS_ORDINARY);
n_fields = dict_index_get_n_fields(index);
......@@ -674,7 +674,7 @@ rec_set_field_extern_bits(
{
ulint i;
if (UNIV_LIKELY(index->table->comp)) {
if (dict_table_is_comp(index->table)) {
for (i = 0; i < n_fields; i++) {
rec_set_nth_field_extern_bit_new(rec, index, vec[i],
TRUE, mtr);
......@@ -841,8 +841,7 @@ rec_convert_dtuple_to_rec_new(
const ulint n_fields = dtuple_get_n_fields(dtuple);
const ulint status = dtuple_get_info_bits(dtuple)
& REC_NEW_STATUS_MASK;
ut_ad(index->table->comp);
ut_ad(dict_table_is_comp(index->table));
ut_ad(n_fields > 0);
/* Try to ensure that the memset() between the for() loops
......@@ -1002,7 +1001,7 @@ rec_convert_dtuple_to_rec(
ut_ad(dtuple_validate(dtuple));
ut_ad(dtuple_check_typed(dtuple));
if (UNIV_LIKELY(index->table->comp)) {
if (dict_table_is_comp(index->table)) {
rec = rec_convert_dtuple_to_rec_new(buf, index, dtuple);
} else {
rec = rec_convert_dtuple_to_rec_old(buf, dtuple);
......@@ -1054,7 +1053,7 @@ rec_copy_prefix_to_dtuple(
ut_ad(dtuple_check_typed(tuple));
dtuple_set_info_bits(tuple,
rec_get_info_bits(rec, index->table->comp));
rec_get_info_bits(rec, dict_table_is_comp(index->table)));
for (i = 0; i < n_fields; i++) {
......@@ -1142,7 +1141,7 @@ rec_copy_prefix_to_buf(
UNIV_PREFETCH_RW(*buf);
if (UNIV_UNLIKELY(!index->table->comp)) {
if (!dict_table_is_comp(index->table)) {
ut_ad(rec_validate_old(rec));
return(rec_copy_prefix_to_buf_old(rec, n_fields,
rec_get_field_start_offs(rec, n_fields),
......@@ -1470,7 +1469,7 @@ rec_print(
{
ut_ad(index);
if (!index->table->comp) {
if (!dict_table_is_comp(index->table)) {
rec_print_old(file, rec);
return;
} else {
......
......@@ -256,7 +256,8 @@ row_ins_sec_index_entry_by_modify(
rec = btr_cur_get_rec(cursor);
ut_ad((cursor->index->type & DICT_CLUSTERED) == 0);
ut_ad(rec_get_deleted_flag(rec, cursor->index->table->comp));
ut_ad(rec_get_deleted_flag(rec,
dict_table_is_comp(cursor->index->table)));
/* We know that in the alphabetical ordering, entry and rec are
identified. But in their binary form there may be differences if
......@@ -321,7 +322,8 @@ row_ins_clust_index_entry_by_modify(
rec = btr_cur_get_rec(cursor);
ut_ad(rec_get_deleted_flag(rec, cursor->index->table->comp));
ut_ad(rec_get_deleted_flag(rec,
dict_table_is_comp(cursor->index->table)));
heap = mem_heap_create(1024);
......@@ -969,7 +971,7 @@ row_ins_foreign_check_on_constraint(
goto nonstandard_exit_func;
}
if (rec_get_deleted_flag(clust_rec, table->comp)) {
if (rec_get_deleted_flag(clust_rec, dict_table_is_comp(table))) {
/* This can happen if there is a circular reference of
rows such that cascading delete comes to delete a row
already in the process of being delete marked */
......
......@@ -437,12 +437,12 @@ row_mysql_convert_row_to_innobase(
}
row_mysql_store_col_in_innobase_format(dfield,
prebuilt->ins_upd_rec_buff
+ templ->mysql_col_offset,
TRUE, /* MySQL row format data */
mysql_rec + templ->mysql_col_offset,
templ->mysql_col_len,
prebuilt->table->comp);
prebuilt->ins_upd_rec_buff
+ templ->mysql_col_offset,
TRUE, /* MySQL row format data */
mysql_rec + templ->mysql_col_offset,
templ->mysql_col_len,
dict_table_is_comp(prebuilt->table));
next_column:
;
}
......
......@@ -229,7 +229,8 @@ row_build(
row = dtuple_create(heap, row_len);
dtuple_set_info_bits(row, rec_get_info_bits(rec, table->comp));
dtuple_set_info_bits(row, rec_get_info_bits(rec,
dict_table_is_comp(table)));
n_fields = rec_offs_n_fields(offsets);
......
......@@ -688,7 +688,8 @@ row_sel_get_clust_rec(
|| btr_pcur_get_low_match(&(plan->clust_pcur))
< dict_index_get_n_unique(index)) {
ut_a(rec_get_deleted_flag(rec, plan->table->comp));
ut_a(rec_get_deleted_flag(rec,
dict_table_is_comp(plan->table)));
ut_a(node->read_view);
/* In a rare case it is possible that no clust rec is found
......@@ -765,7 +766,8 @@ row_sel_get_clust_rec(
visit through secondary index records that would not really
exist in our snapshot. */
if ((old_vers || rec_get_deleted_flag(rec, plan->table->comp))
if ((old_vers || rec_get_deleted_flag(rec,
dict_table_is_comp(plan->table)))
&& !row_sel_sec_rec_is_for_clust_rec(rec, plan->index,
clust_rec, index)) {
goto func_exit;
......@@ -1082,7 +1084,7 @@ row_sel_try_search_shortcut(
row_sel_fetch_columns(index, rec, offsets,
UT_LIST_GET_FIRST(plan->columns));
if (rec_get_deleted_flag(rec, plan->table->comp)) {
if (rec_get_deleted_flag(rec, dict_table_is_comp(plan->table))) {
ret = SEL_EXHAUSTED;
goto func_exit;
......@@ -1491,7 +1493,7 @@ row_sel(
goto table_exhausted;
}
if (rec_get_deleted_flag(rec, plan->table->comp)
if (rec_get_deleted_flag(rec, dict_table_is_comp(plan->table))
&& !cons_read_requires_clust_rec) {
/* The record is delete marked: we can skip it if this is
......@@ -1535,7 +1537,8 @@ row_sel(
goto next_rec;
}
if (rec_get_deleted_flag(clust_rec, plan->table->comp)) {
if (rec_get_deleted_flag(clust_rec,
dict_table_is_comp(plan->table))) {
/* The record is delete marked: we can skip it */
......@@ -2203,7 +2206,7 @@ row_sel_convert_mysql_key_to_innobase(
FALSE, /* MySQL key value format col */
key_ptr + data_offset,
data_len,
index->table->comp);
dict_table_is_comp(index->table));
buf += data_len;
}
......@@ -2711,7 +2714,8 @@ row_sel_get_clust_rec_for_mysql(
clustered index record did not exist in the read view of
trx. */
if (!rec_get_deleted_flag(rec, sec_index->table->comp)
if (!rec_get_deleted_flag(rec,
dict_table_is_comp(sec_index->table))
|| prebuilt->select_lock_type != LOCK_NONE) {
ut_print_timestamp(stderr);
fputs(" InnoDB: error clustered record"
......@@ -2796,7 +2800,7 @@ row_sel_get_clust_rec_for_mysql(
if (clust_rec && (old_vers
|| rec_get_deleted_flag(rec,
sec_index->table->comp))
dict_table_is_comp(sec_index->table)))
&& !row_sel_sec_rec_is_for_clust_rec(rec, sec_index,
clust_rec, clust_index)) {
clust_rec = NULL;
......@@ -3051,7 +3055,7 @@ row_sel_try_search_shortcut_for_mysql(
return(SEL_RETRY);
}
if (rec_get_deleted_flag(rec, index->table->comp)) {
if (rec_get_deleted_flag(rec, dict_table_is_comp(index->table))) {
return(SEL_EXHAUSTED);
}
......@@ -3095,7 +3099,7 @@ row_search_for_mysql(
cursor 'direction' should be 0. */
{
dict_index_t* index = prebuilt->index;
ibool comp = index->table->comp;
ibool comp = dict_table_is_comp(index->table);
dtuple_t* search_tuple = prebuilt->search_tuple;
btr_pcur_t* pcur = prebuilt->pcur;
trx_t* trx = prebuilt->trx;
......
......@@ -1306,7 +1306,8 @@ row_upd_sec_index_entry(
delete marked if we return after a lock wait in
row_ins_index_entry below */
if (!rec_get_deleted_flag(rec, index->table->comp)) {
if (!rec_get_deleted_flag(rec,
dict_table_is_comp(index->table))) {
err = btr_cur_del_mark_set_sec_rec(0, btr_cur, TRUE,
thr, &mtr);
if (err == DB_SUCCESS && check_ref) {
......@@ -1506,7 +1507,7 @@ row_upd_clust_rec(
btr_cur = btr_pcur_get_btr_cur(pcur);
ut_ad(!rec_get_deleted_flag(btr_pcur_get_rec(pcur),
index->table->comp));
dict_table_is_comp(index->table)));
/* Try optimistic updating of the record, keeping changes within
the page; we do not check locks because we assume the x-lock on the
......@@ -1543,7 +1544,7 @@ row_upd_clust_rec(
ut_a(btr_pcur_restore_position(BTR_MODIFY_TREE, pcur, mtr));
ut_ad(!rec_get_deleted_flag(btr_pcur_get_rec(pcur),
index->table->comp));
dict_table_is_comp(index->table)));
err = btr_cur_pessimistic_update(BTR_NO_LOCKING_FLAG, btr_cur,
&big_rec, node->update,
......@@ -2037,7 +2038,7 @@ row_upd_in_place_in_select(
row_upd_eval_new_vals(node->update);
ut_ad(!rec_get_deleted_flag(btr_pcur_get_rec(pcur),
btr_cur->index->table->comp));
dict_table_is_comp(btr_cur->index->table)));
ut_ad(node->cmpl_info & UPD_NODE_NO_SIZE_CHANGE);
ut_ad(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE);
......
......@@ -123,7 +123,7 @@ row_vers_impl_x_locked_off_kernel(
comp = page_rec_is_comp(rec);
ut_ad(index->table == clust_index->table);
ut_ad(!!comp == index->table->comp);
ut_ad(!!comp == dict_table_is_comp(index->table));
ut_ad(!comp == !page_rec_is_comp(clust_rec));
/* We look up if some earlier version, which was modified by the trx_id
......@@ -323,7 +323,7 @@ row_vers_old_has_index_entry(
clust_index = dict_table_get_first_index(index->table);
comp = page_rec_is_comp(rec);
ut_ad(!index->table->comp == !comp);
ut_ad(!dict_table_is_comp(index->table) == !comp);
heap = mem_heap_create(1024);
clust_offsets = rec_get_offsets(rec, clust_index, NULL,
ULINT_UNDEFINED, &heap);
......
......@@ -919,7 +919,7 @@ srv_init(void)
/* create dummy table and index for old-style infimum and supremum */
table = dict_mem_table_create("SYS_DUMMY1",
DICT_HDR_SPACE, 1, FALSE);
DICT_HDR_SPACE, 1, 0);
dict_mem_table_add_col(table, "DUMMY", DATA_CHAR,
DATA_ENGLISH | DATA_NOT_NULL, 8, 0);
......@@ -930,7 +930,7 @@ srv_init(void)
srv_sys->dummy_ind1->table = table;
/* create dummy table and index for new-style infimum and supremum */
table = dict_mem_table_create("SYS_DUMMY2",
DICT_HDR_SPACE, 1, TRUE);
DICT_HDR_SPACE, 1, DICT_TF_COMPACT);
dict_mem_table_add_col(table, "DUMMY", DATA_CHAR,
DATA_ENGLISH | DATA_NOT_NULL, 8, 0);
srv_sys->dummy_ind2 = dict_mem_index_create("SYS_DUMMY2",
......
......@@ -455,7 +455,7 @@ trx_undo_page_report_modify(
/* Store first some general parameters to the undo log */
if (update) {
if (rec_get_deleted_flag(rec, table->comp)) {
if (rec_get_deleted_flag(rec, dict_table_is_comp(table))) {
type_cmpl = TRX_UNDO_UPD_DEL_REC;
} else {
type_cmpl = TRX_UNDO_UPD_EXIST_REC;
......@@ -480,7 +480,7 @@ trx_undo_page_report_modify(
/*----------------------------------------*/
/* Store the state of the info bits */
bits = rec_get_info_bits(rec, table->comp);
bits = rec_get_info_bits(rec, dict_table_is_comp(table));
mach_write_to_1(ptr, bits);
ptr += 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment