Commit e8e9fb28 authored by marko's avatar marko

branches/zip: Note that TRX_ID and ROLL_PTR only exist on B-tree leaf pages

of clustered indexes.  Previously, parts of the code assumed that these
columns would exist on all leaf pages.  Simplify the update-in-place of
these columns.

Add inline function dict_index_is_clust() to replace all tests
index->type & DICT_CLUSTERED.

Remove the redo log entry types MLOG_ZIP_WRITE_TRX_ID and
MLOG_ZIP_WRITE_ROLL_PTR, because the modifications to these columns
are covered by logical logging.

Fuse page_zip_write_trx_id() and page_zip_write_roll_ptr() into
page_zip_write_trx_id_and_roll_ptr().

page_zip_dir_add_slot(), page_zip_available(): Add flag "is_clustered",
so that no space will be reserved for TRX_ID and ROLL_PTR on leaf pages
of secondary indexes.

page_zip_apply_log(): Flag an error when val==0 is encoded with two bytes.

page_zip_write_rec(): Add debug assertions that there is enough space
available for the entry before copying the data bytes of the record.
parent e677258a
......@@ -916,7 +916,7 @@ btr_cur_ins_lock_and_undo(
return(err);
}
if ((index->type & DICT_CLUSTERED) && !(index->type & DICT_IBUF)) {
if (dict_index_is_clust(index) && !(index->type & DICT_IBUF)) {
err = trx_undo_report_row_operation(flags, TRX_UNDO_INSERT_OP,
thr, index, entry, NULL, 0, NULL,
......@@ -1365,7 +1365,7 @@ btr_cur_upd_lock_and_undo(
rec = btr_cur_get_rec(cursor);
index = cursor->index;
if (!(index->type & DICT_CLUSTERED)) {
if (!dict_index_is_clust(index)) {
/* We do undo logging only when we update a clustered index
record */
return(lock_sec_rec_modify_check_and_lock(flags, rec, index,
......@@ -1597,7 +1597,7 @@ btr_cur_update_in_place(
if the update vector was built for a clustered index, we must
NOT call it if index is secondary */
if (!(index->type & DICT_CLUSTERED)
if (!dict_index_is_clust(index)
|| row_upd_changes_ord_field_binary(NULL, index, update)) {
/* Remove possible hash index pointer to this record */
......@@ -2307,7 +2307,7 @@ btr_cur_del_mark_set_clust_rec(
}
#endif /* UNIV_DEBUG */
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
ut_ad(!rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
page_zip = buf_block_get_page_zip(buf_block_align(rec));
......@@ -3516,7 +3516,7 @@ btr_store_big_rec_extern_fields(
MTR_MEMO_X_LOCK));
ut_ad(mtr_memo_contains(local_mtr, buf_block_align(rec),
MTR_MEMO_PAGE_X_FIX));
ut_a(index->type & DICT_CLUSTERED);
ut_a(dict_index_is_clust(index));
space_id = buf_frame_get_space_id(rec);
......
......@@ -551,7 +551,7 @@ dict_build_index_def_step(
node->table = table;
ut_ad((UT_LIST_GET_LEN(table->indexes) > 0)
|| (index->type & DICT_CLUSTERED));
|| dict_index_is_clust(index));
index->id = dict_hdr_get_new_id(DICT_HDR_INDEX_ID);
......@@ -614,7 +614,7 @@ dict_create_index_tree_step(
sys_indexes = dict_sys->sys_indexes;
if (index->type & DICT_CLUSTERED
if (dict_index_is_clust(index)
&& table->type == DICT_TABLE_CLUSTER_MEMBER) {
/* Do not create a new index tree: entries are put to the
......
......@@ -497,7 +497,7 @@ dict_index_get_nth_col_pos(
col = dict_table_get_nth_col(index->table, n);
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
return(col->clust_pos);
}
......@@ -535,7 +535,7 @@ dict_index_contains_col_or_prefix(
ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
return(TRUE);
}
......@@ -1411,8 +1411,8 @@ dict_index_add_to_cache(
index2 = UT_LIST_GET_NEXT(indexes, index2);
}
ut_a(UT_LIST_GET_LEN(table->indexes) == 0
|| (index->type & DICT_CLUSTERED) == 0);
ut_a(!dict_index_is_clust(index)
|| UT_LIST_GET_LEN(table->indexes) == 0);
}
success = dict_index_find_cols(table, index);
......@@ -1426,7 +1426,7 @@ dict_index_add_to_cache(
/* Build the cache internal representation of the index,
containing also the added system fields */
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
new_index = dict_index_build_internal_clust(table, index);
} else {
new_index = dict_index_build_internal_non_clust(table, index);
......@@ -1723,7 +1723,7 @@ dict_index_build_internal_clust(
ulint i;
ut_ad(table && index);
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
......@@ -1897,7 +1897,7 @@ dict_index_build_internal_non_clust(
ulint i;
ut_ad(table && index);
ut_ad(0 == (index->type & DICT_CLUSTERED));
ut_ad(!dict_index_is_clust(index));
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&(dict_sys->mutex)));
#endif /* UNIV_SYNC_DEBUG */
......@@ -1907,7 +1907,7 @@ dict_index_build_internal_non_clust(
clust_index = UT_LIST_GET_FIRST(table->indexes);
ut_ad(clust_index);
ut_ad(clust_index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(clust_index));
ut_ad(!(clust_index->type & DICT_UNIVERSAL));
/* Create a new index */
......@@ -3697,7 +3697,7 @@ dict_tree_find_index_low(
ut_ad(index);
table = index->table;
if ((index->type & DICT_CLUSTERED)
if (dict_index_is_clust(index)
&& UNIV_UNLIKELY(table->type != DICT_TABLE_ORDINARY)) {
/* Get the mix id of the record */
......
......@@ -2530,7 +2530,7 @@ ibuf_insert_low(
mtr_t mtr;
mtr_t bitmap_mtr;
ut_a(!(index->type & DICT_CLUSTERED));
ut_a(!dict_index_is_clust(index));
ut_ad(dtuple_check_typed(entry));
ut_a(trx_sys_multiple_tablespace_format);
......@@ -2771,7 +2771,7 @@ ibuf_insert(
ut_a(trx_sys_multiple_tablespace_format);
ut_ad(dtuple_check_typed(entry));
ut_a(!(index->type & DICT_CLUSTERED));
ut_a(!dict_index_is_clust(index));
ut_a(!dict_table_is_zip(index->table));
if (rec_get_converted_size(index, entry)
......
......@@ -422,6 +422,17 @@ dict_table_get_next_index_noninline(
/*================================*/
/* out: index, NULL if none left */
dict_index_t* index); /* in: index */
/************************************************************************
Check whether the index is the clustered index. */
UNIV_INLINE
ulint
dict_index_is_clust(
/*================*/
/* out: nonzero for clustered index,
zero for other indexes */
const dict_index_t* index); /* in: index */
/************************************************************************
Gets the number of user-defined columns in a table in the dictionary
cache. */
......
......@@ -79,6 +79,22 @@ dict_table_get_next_index(
return(UT_LIST_GET_NEXT(indexes, index));
}
/************************************************************************
Check whether the index is the clustered index. */
UNIV_INLINE
ulint
dict_index_is_clust(
/*================*/
/* out: nonzero for clustered index,
zero for other indexes */
const dict_index_t* index) /* in: index */
{
ut_ad(index);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
return(UNIV_UNLIKELY(index->type & DICT_CLUSTERED));
}
/************************************************************************
Gets the number of user-defined columns in a table in the dictionary
cache. */
......@@ -276,7 +292,7 @@ dict_index_get_n_unique_in_tree(
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
ut_ad(index->cached);
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
return(dict_index_get_n_unique(index));
}
......@@ -336,7 +352,7 @@ dict_index_get_sys_col_pos(
col = dict_table_get_sys_col(index->table, type);
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
return(col->clust_pos);
}
......
......@@ -87,7 +87,7 @@ ibuf_should_try(
a secondary index when we
decide */
{
if (!(index->type & DICT_CLUSTERED)
if (!dict_index_is_clust(index)
&& !dict_table_is_zip(index->table)
&& (ignore_sec_unique || !(index->type & DICT_UNIQUE))
&& ibuf->meter > IBUF_THRESHOLD) {
......
......@@ -68,7 +68,7 @@ lock_clust_rec_some_has_impl(
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
ut_ad(page_rec_is_user_rec(rec));
trx_id = row_get_rec_trx_id(rec, index, offsets);
......
......@@ -132,22 +132,16 @@ flag value must give the length also! */
#define MLOG_ZIP_WRITE_NODE_PTR ((byte)47) /* write the node pointer of
a record on a compressed
non-leaf B-tree page */
#define MLOG_ZIP_WRITE_TRX_ID ((byte)48) /* write the trx_id of
a record on a compressed
leaf B-tree page */
#define MLOG_ZIP_WRITE_ROLL_PTR ((byte)49) /* write the roll_ptr of
a record on a compressed
leaf B-tree page */
#define MLOG_ZIP_WRITE_BLOB_PTR ((byte)50) /* write the BLOB pointer
#define MLOG_ZIP_WRITE_BLOB_PTR ((byte)48) /* write the BLOB pointer
of an externally stored column
on a compressed page */
#define MLOG_ZIP_COMPRESS ((byte)51) /* compress a page */
#define MLOG_ZIP_DECOMPRESS ((byte)52) /* decompress a page
#define MLOG_ZIP_COMPRESS ((byte)49) /* compress a page */
#define MLOG_ZIP_DECOMPRESS ((byte)50) /* decompress a page
to undo a compressed page
overflow */
#define MLOG_ZIP_WRITE_HEADER ((byte)53) /* write to compressed page
#define MLOG_ZIP_WRITE_HEADER ((byte)51) /* write to compressed page
header */
#define MLOG_BIGGEST_TYPE ((byte)53) /* biggest value (used in
#define MLOG_BIGGEST_TYPE ((byte)51) /* biggest value (used in
asserts) */
/*******************************************************************
......
......@@ -120,6 +120,8 @@ Add a slot to the dense page directory. */
void
page_zip_dir_add_slot(
/*==================*/
page_zip_des_t* page_zip)/* in/out: compressed page */
page_zip_des_t* page_zip, /* in/out: compressed page */
ulint is_clustered) /* in: nonzero for clustered index,
zero for others */
__attribute__((nonnull));
#endif
......@@ -147,30 +147,18 @@ page_zip_write_node_ptr(
__attribute__((nonnull(1,2)));
/**************************************************************************
Write the trx_id of a record on a B-tree leaf node page. */
Write the trx_id and roll_ptr of a record on a B-tree leaf node page. */
void
page_zip_write_trx_id(
/*==================*/
page_zip_write_trx_id_and_roll_ptr(
/*===============================*/
page_zip_des_t* page_zip,/* in/out: compressed page */
byte* rec, /* in/out: record */
ulint size, /* in: data size of rec */
const ulint* offsets,/* in: rec_get_offsets(rec, index) */
ulint trx_id_col,/* in: column number of TRX_ID in rec */
dulint trx_id, /* in: transaction identifier */
mtr_t* mtr) /* in: mini-transaction, or NULL */
__attribute__((nonnull(1,2)));
/**************************************************************************
Write the roll_ptr of a record on a B-tree leaf node page. */
void
page_zip_write_roll_ptr(
/*====================*/
page_zip_des_t* page_zip,/* in/out: compressed page */
byte* rec, /* in/out: record */
ulint size, /* in: data size of rec */
dulint roll_ptr,/* in: roll_ptr */
mtr_t* mtr) /* in: mini-transaction, or NULL */
__attribute__((nonnull(1,2)));
dulint roll_ptr)/* in: roll_ptr */
__attribute__((nonnull));
/**************************************************************************
Clear a record on the uncompressed and compressed page, if possible. */
......@@ -236,7 +224,9 @@ Add a slot to the dense page directory. */
void
page_zip_dir_add_slot(
/*==================*/
page_zip_des_t* page_zip)/* in/out: compressed page */
page_zip_des_t* page_zip, /* in/out: compressed page */
ulint is_clustered) /* in: nonzero for clustered index,
zero for others */
__attribute__((nonnull));
/**************************************************************************
......
......@@ -65,13 +65,14 @@ In summary, the compressed page looks like this:
(3) Compressed page data
(4) Page modification log (page_zip->m_start..page_zip->m_end)
(5) Empty zero-filled space
(6) BLOB pointers
(6) BLOB pointers (on leaf pages)
- BTR_EXTERN_FIELD_REF_SIZE for each externally stored column
- in descending collation order
(7) Uncompressed columns of user records, n_dense * uncompressed_size bytes,
- indexed by heap_no
- DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN if page_is_leaf(page_zip->data)
- REC_NODE_PTR_SIZE otherwise
- DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN for leaf pages of clustered indexes
- REC_NODE_PTR_SIZE for non-leaf pages
- 0 otherwise
(8) dense page directory, stored backwards
- n_dense = n_heap - 2
- existing records in ascending collation order
......@@ -102,6 +103,8 @@ page_zip_available(
ulint length, /* in: combined size of the record */
ulint is_leaf,/* in: nonzero=leaf node,
zero=node pointer page */
ulint is_clustered,/* in: nonzero=clustered index,
zero=secondary index */
ulint create) /* in: nonzero=add the record to
the heap */
__attribute__((warn_unused_result, nonnull, pure));
......@@ -161,7 +164,8 @@ page_zip_alloc(
ut_ad(page_is_comp((page_t*) page));
ut_ad(page_zip_validate(page_zip, page));
if (page_zip_available(page_zip, length, page_is_leaf(page), create)) {
if (page_zip_available(page_zip, length, page_is_leaf(page),
dict_index_is_clust(index), create)) {
return(TRUE);
}
......@@ -177,8 +181,8 @@ page_zip_alloc(
}
/* Check if there is enough space available after compression. */
return(page_zip_available(page_zip, length,
page_is_leaf(page), create));
return(page_zip_available(page_zip, length, page_is_leaf(page),
dict_index_is_clust(index), create));
}
/**************************************************************************
......@@ -193,6 +197,8 @@ page_zip_available(
ulint length, /* in: combined size of the record */
ulint is_leaf,/* in: nonzero=leaf node,
zero=node pointer page */
ulint is_clustered,/* in: nonzero=clustered index,
zero=secondary index */
ulint create) /* in: nonzero=add the record to
the heap */
{
......@@ -202,12 +208,14 @@ page_zip_available(
ut_ad(page_zip_simple_validate(page_zip));
ut_ad(length > REC_N_NEW_EXTRA_BYTES);
if (is_leaf) {
if (UNIV_UNLIKELY(!is_leaf)) {
uncompressed_size = PAGE_ZIP_DIR_SLOT_SIZE
+ REC_NODE_PTR_SIZE;
} else if (UNIV_UNLIKELY(is_clustered)) {
uncompressed_size = PAGE_ZIP_DIR_SLOT_SIZE
+ DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN;
} else {
uncompressed_size = PAGE_ZIP_DIR_SLOT_SIZE
+ REC_NODE_PTR_SIZE;
uncompressed_size = PAGE_ZIP_DIR_SLOT_SIZE;
}
trailer_len = (page_dir_get_n_heap((page_t*) page_zip->data) - 2)
......
......@@ -23,7 +23,7 @@ row_get_rec_trx_id(
{
ulint offset;
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
ut_ad(rec_offs_validate(rec, index, offsets));
offset = index->trx_id_offset;
......@@ -48,7 +48,7 @@ row_get_rec_roll_ptr(
{
ulint offset;
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
ut_ad(rec_offs_validate(rec, index, offsets));
offset = index->trx_id_offset;
......
......@@ -113,7 +113,7 @@ row_upd_rec_sys_fields(
trx_t* trx, /* in: transaction */
dulint roll_ptr)/* in: roll ptr of the undo log record */
{
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
ut_ad(rec_offs_validate(rec, index, offsets));
#ifdef UNIV_SYNC_DEBUG
ut_ad(!buf_block_align(rec)->is_hashed
......@@ -121,12 +121,9 @@ row_upd_rec_sys_fields(
#endif /* UNIV_SYNC_DEBUG */
if (UNIV_LIKELY_NULL(page_zip)) {
page_zip_write_trx_id(
page_zip, rec, rec_offs_data_size(offsets),
trx->id, NULL);
page_zip_write_roll_ptr(
page_zip, rec, rec_offs_data_size(offsets),
roll_ptr, NULL);
ulint pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID);
page_zip_write_trx_id_and_roll_ptr(page_zip, rec, offsets,
pos, trx->id, roll_ptr);
} else {
ulint offset = index->trx_id_offset;
......@@ -134,6 +131,9 @@ row_upd_rec_sys_fields(
offset = row_get_trx_id_offset(rec, index, offsets);
}
#if DATA_TRX_ID + 1 != DATA_ROLL_PTR
# error "DATA_TRX_ID + 1 != DATA_ROLL_PTR"
#endif
trx_write_trx_id(rec + offset, trx->id);
trx_write_roll_ptr(rec + offset + DATA_TRX_ID_LEN, roll_ptr);
}
......
......@@ -516,7 +516,7 @@ lock_clust_rec_cons_read_sees(
{
dulint trx_id;
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
ut_ad(page_rec_is_user_rec(rec));
ut_ad(rec_offs_validate(rec, index, offsets));
......@@ -552,7 +552,7 @@ lock_sec_rec_cons_read_sees(
UT_NOT_USED(index);
ut_ad(!(index->type & DICT_CLUSTERED));
ut_ad(!dict_index_is_clust(index));
ut_ad(page_rec_is_user_rec(rec));
/* NOTE that we might call this function while holding the search
......@@ -1667,7 +1667,7 @@ lock_sec_rec_some_has_impl_off_kernel(
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
ut_ad(!(index->type & DICT_CLUSTERED));
ut_ad(!dict_index_is_clust(index));
ut_ad(page_rec_is_user_rec(rec));
ut_ad(rec_offs_validate(rec, index, offsets));
......@@ -4588,7 +4588,7 @@ lock_rec_queue_validate(
}
if (!index);
else if (index->type & DICT_CLUSTERED) {
else if (dict_index_is_clust(index)) {
impl_trx = lock_clust_rec_some_has_impl(rec, index, offsets);
......@@ -4881,7 +4881,7 @@ lock_rec_insert_check_and_lock(
lock_mutex_exit_kernel();
if (!(index->type & DICT_CLUSTERED)) {
if (!dict_index_is_clust(index)) {
buf_block_t* block = buf_block_align(rec);
/* Update the page max trx id field */
......@@ -4921,7 +4921,7 @@ lock_rec_insert_check_and_lock(
lock_mutex_exit_kernel();
if ((err == DB_SUCCESS) && !(index->type & DICT_CLUSTERED)) {
if ((err == DB_SUCCESS) && !dict_index_is_clust(index)) {
buf_block_t* block = buf_block_align(rec);
/* Update the page max trx id field */
......@@ -4970,7 +4970,7 @@ lock_rec_convert_impl_to_expl(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!page_rec_is_comp(rec) == !rec_offs_comp(offsets));
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
impl_trx = lock_clust_rec_some_has_impl(rec, index, offsets);
} else {
impl_trx = lock_sec_rec_some_has_impl_off_kernel(
......@@ -5022,7 +5022,7 @@ lock_clust_rec_modify_check_and_lock(
ulint err;
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
if (flags & BTR_NO_LOCKING_FLAG) {
......@@ -5072,7 +5072,7 @@ lock_sec_rec_modify_check_and_lock(
return(DB_SUCCESS);
}
ut_ad(!(index->type & DICT_CLUSTERED));
ut_ad(!dict_index_is_clust(index));
/* Another transaction cannot have an implicit lock on the record,
because when we come here, we already have modified the clustered
......@@ -5140,7 +5140,7 @@ lock_sec_rec_read_check_and_lock(
{
ulint err;
ut_ad(!(index->type & DICT_CLUSTERED));
ut_ad(!dict_index_is_clust(index));
ut_ad(page_rec_is_user_rec(rec) || page_rec_is_supremum(rec));
ut_ad(rec_offs_validate(rec, index, offsets));
......@@ -5206,7 +5206,7 @@ lock_clust_rec_read_check_and_lock(
{
ulint err;
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
ut_ad(page_rec_is_user_rec(rec) || page_rec_is_supremum(rec));
ut_ad(gap_mode == LOCK_ORDINARY || gap_mode == LOCK_GAP
|| gap_mode == LOCK_REC_NOT_GAP);
......
......@@ -894,8 +894,6 @@ recv_parse_or_apply_log_rec_body(
ULINT_UNDEFINED);
break;
case MLOG_ZIP_WRITE_NODE_PTR:
case MLOG_ZIP_WRITE_TRX_ID:
case MLOG_ZIP_WRITE_ROLL_PTR:
case MLOG_ZIP_WRITE_HEADER:
ut_error; /* TODO */
break;
......
......@@ -998,7 +998,8 @@ page_cur_insert_rec_low(
}
if (UNIV_LIKELY_NULL(page_zip)) {
page_zip_dir_add_slot(page_zip);
page_zip_dir_add_slot(page_zip,
dict_index_is_clust(index));
}
}
......
This diff is collapsed.
......@@ -367,14 +367,14 @@ opt_calc_index_goodness(
if (goodness >= 4 * dict_index_get_n_unique(index)) {
goodness += 1024;
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
goodness += 1024;
}
}
/* We have to test for goodness here, as last_op may note be set */
if (goodness && index->type & DICT_CLUSTERED) {
if (goodness && dict_index_is_clust(index)) {
goodness++;
}
......@@ -593,7 +593,7 @@ opt_search_plan_for_table(
best_last_op);
}
if ((best_index->type & DICT_CLUSTERED)
if (dict_index_is_clust(best_index)
&& (plan->n_exact_match >= dict_index_get_n_unique(best_index))) {
plan->unique_search = TRUE;
......@@ -602,7 +602,7 @@ opt_search_plan_for_table(
}
if ((table->type != DICT_TABLE_ORDINARY)
&& (best_index->type & DICT_CLUSTERED)) {
&& dict_index_is_clust(best_index)) {
plan->mixed_index = TRUE;
......@@ -934,7 +934,7 @@ opt_find_all_cols(
= dict_index_get_nth_col_pos(
dict_table_get_first_index(index->table),
sym_node->col_no);
if (!(index->type & DICT_CLUSTERED)) {
if (!dict_index_is_clust(index)) {
ut_a(plan);
......@@ -1070,7 +1070,7 @@ opt_clust_access(
plan->no_prefetch = FALSE;
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
plan->clust_map = NULL;
plan->clust_ref = NULL;
......
......@@ -992,7 +992,7 @@ pars_update_statement(
plan->no_prefetch = TRUE;
if (!((plan->index)->type & DICT_CLUSTERED)) {
if (!dict_index_is_clust(plan->index)) {
plan->must_get_clust = TRUE;
......
......@@ -255,7 +255,7 @@ row_ins_sec_index_entry_by_modify(
rec = btr_cur_get_rec(cursor);
ut_ad((cursor->index->type & DICT_CLUSTERED) == 0);
ut_ad(!dict_index_is_clust(cursor->index));
ut_ad(rec_get_deleted_flag(rec,
dict_table_is_comp(cursor->index->table)));
......@@ -319,7 +319,7 @@ row_ins_clust_index_entry_by_modify(
upd_t* update;
ulint err;
ut_ad(cursor->index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(cursor->index));
*big_rec = NULL;
......@@ -913,7 +913,7 @@ row_ins_foreign_check_on_constraint(
rec = btr_pcur_get_rec(pcur);
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
/* pcur is already positioned in the clustered index of
the child table */
......@@ -1134,7 +1134,7 @@ row_ins_set_shared_rec_lock(
ut_ad(rec_offs_validate(rec, index, offsets));
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
err = lock_clust_rec_read_check_and_lock(0,
rec, index, offsets, LOCK_S, type, thr);
} else {
......@@ -1165,7 +1165,7 @@ row_ins_set_exclusive_rec_lock(
ut_ad(rec_offs_validate(rec, index, offsets));
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
err = lock_clust_rec_read_check_and_lock(0,
rec, index, offsets, LOCK_X, type, thr);
} else {
......@@ -1613,7 +1613,7 @@ row_ins_dupl_error_with_rec(
/* In a unique secondary index we allow equal key values if they
contain SQL NULLs */
if (!(index->type & DICT_CLUSTERED)) {
if (!dict_index_is_clust(index)) {
for (i = 0; i < n_unique; i++) {
if (UNIV_SQL_NULL == dfield_get_len(
......@@ -1792,7 +1792,7 @@ row_ins_duplicate_error_in_clust(
UT_NOT_USED(mtr);
ut_a(cursor->index->type & DICT_CLUSTERED);
ut_a(dict_index_is_clust(cursor->index));
ut_ad(cursor->index->type & DICT_UNIQUE);
/* NOTE: For unique non-clustered indexes there may be any number
......@@ -1891,7 +1891,7 @@ row_ins_duplicate_error_in_clust(
mem_heap_free(heap);
}
ut_a(!(cursor->index->type & DICT_CLUSTERED));
ut_a(!dict_index_is_clust(cursor->index));
/* This should never happen */
}
......@@ -2033,7 +2033,7 @@ row_ins_index_entry_low(
if (index->type & DICT_UNIQUE && (cursor.up_match >= n_unique
|| cursor.low_match >= n_unique)) {
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
/* Note that the following may return also
DB_LOCK_WAIT */
......@@ -2079,7 +2079,7 @@ row_ins_index_entry_low(
btr_cur_position(index, rec, &cursor);
}
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
err = row_ins_clust_index_entry_by_modify(mode,
&cursor, &big_rec,
entry,
......
......@@ -1500,7 +1500,7 @@ row_unlock_for_mysql(
reset locks on clust_pcur. The values in clust_pcur may be
garbage! */
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
goto func_exit;
}
......
......@@ -43,7 +43,7 @@ row_get_trx_id_offset(
byte* field;
ulint len;
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
ut_ad(rec_offs_validate(rec, index, offsets));
pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID);
......@@ -167,7 +167,7 @@ row_build(
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
ut_ad(index && rec && heap);
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
if (!offsets) {
offsets = rec_get_offsets(rec, index, offsets_,
......@@ -624,7 +624,7 @@ row_get_clust_rec(
ibool found;
rec_t* clust_rec;
ut_ad((index->type & DICT_CLUSTERED) == 0);
ut_ad(!dict_index_is_clust(index));
table = index->table;
......
......@@ -295,7 +295,7 @@ row_sel_fetch_columns(
ut_ad(rec_offs_validate(rec, index, offsets));
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
index_type = SYM_CLUST_FIELD_NO;
} else {
index_type = SYM_SEC_FIELD_NO;
......@@ -814,7 +814,7 @@ sel_set_rec_lock(
}
}
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
err = lock_clust_rec_read_check_and_lock(0,
rec, index, offsets, mode, type, thr);
} else {
......@@ -1067,7 +1067,7 @@ row_sel_try_search_shortcut(
offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
if (!lock_clust_rec_cons_read_sees(rec, index, offsets,
node->read_view)) {
ret = SEL_RETRY;
......@@ -1435,7 +1435,7 @@ row_sel(
/* This is a non-locking consistent read: if necessary, fetch
a previous version of the record */
if (index->type & DICT_CLUSTERED) {
if (dict_index_is_clust(index)) {
if (!lock_clust_rec_cons_read_sees(rec, index, offsets,
node->read_view)) {
......@@ -3015,7 +3015,7 @@ row_sel_try_search_shortcut_for_mysql(
trx_t* trx = prebuilt->trx;
rec_t* rec;
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
ut_ad(!prebuilt->templ_contains_blob);
btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE,
......@@ -3296,7 +3296,7 @@ stderr);
&& index->type & DICT_UNIQUE
&& dtuple_get_n_fields(search_tuple)
== dict_index_get_n_unique(index)
&& (index->type & DICT_CLUSTERED
&& (dict_index_is_clust(index)
|| !dtuple_contains_null(search_tuple))) {
/* Note above that a UNIQUE secondary index can contain many
......@@ -3333,7 +3333,7 @@ stderr);
if (UNIV_UNLIKELY(direction == 0)
&& unique_search
&& index->type & DICT_CLUSTERED
&& dict_index_is_clust(index)
&& !prebuilt->templ_contains_blob
&& !prebuilt->used_in_HANDLER
&& (prebuilt->mysql_row_len < UNIV_PAGE_SIZE / 8)) {
......
......@@ -309,17 +309,20 @@ row_upd_rec_sys_fields_in_recovery(
dulint trx_id, /* in: transaction id */
dulint roll_ptr)/* in: roll ptr of the undo log record */
{
ut_ad(rec_offs_validate(rec, NULL, offsets));
if (UNIV_LIKELY_NULL(page_zip)) {
page_zip_write_trx_id(page_zip, rec,
rec_offs_size(offsets), trx_id, NULL);
page_zip_write_roll_ptr(page_zip, rec,
rec_offs_size(offsets), roll_ptr, NULL);
page_zip_write_trx_id_and_roll_ptr(
page_zip, rec, offsets, pos, trx_id, roll_ptr);
} else {
byte* field;
ulint len;
field = rec_get_nth_field(rec, offsets, pos, &len);
ut_ad(len == DATA_TRX_ID_LEN);
#if DATA_TRX_ID + 1 != DATA_ROLL_PTR
# error "DATA_TRX_ID + 1 != DATA_ROLL_PTR"
#endif
trx_write_trx_id(field, trx_id);
trx_write_roll_ptr(field + DATA_TRX_ID_LEN, roll_ptr);
}
......@@ -343,7 +346,7 @@ row_upd_index_entry_sys_field(
byte* field;
ulint pos;
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
pos = dict_index_get_sys_col_pos(index, type);
......@@ -492,7 +495,7 @@ row_upd_write_sys_vals_to_log(
in mlog */
mtr_t* mtr __attribute__((unused))) /* in: mtr */
{
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
ut_ad(mtr);
log_ptr += mach_write_compressed(log_ptr,
......@@ -743,7 +746,7 @@ row_upd_build_sec_rec_difference_binary(
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
/* This function is used only for a secondary index */
ut_a(0 == (index->type & DICT_CLUSTERED));
ut_a(!dict_index_is_clust(index));
update = upd_create(dtuple_get_n_fields(entry), heap);
......@@ -821,7 +824,7 @@ row_upd_build_difference_binary(
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
/* This function is used only for a clustered index */
ut_a(index->type & DICT_CLUSTERED);
ut_a(dict_index_is_clust(index));
update = upd_create(dtuple_get_n_fields(entry), heap);
......@@ -1380,7 +1383,7 @@ row_upd_sec_step(
ut_ad((node->state == UPD_NODE_UPDATE_ALL_SEC)
|| (node->state == UPD_NODE_UPDATE_SOME_SEC));
ut_ad(!(node->index->type & DICT_CLUSTERED));
ut_ad(!dict_index_is_clust(node->index));
if (node->state == UPD_NODE_UPDATE_ALL_SEC
|| row_upd_changes_ord_field_binary(node->row, node->index,
......@@ -1420,7 +1423,7 @@ row_upd_clust_rec_by_insert(
ulint err;
ut_ad(node);
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
trx = thr_get_trx(thr);
table = node->table;
......@@ -1523,7 +1526,7 @@ row_upd_clust_rec(
ulint err;
ut_ad(node);
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
pcur = node->pcur;
btr_cur = btr_pcur_get_btr_cur(pcur);
......@@ -1620,7 +1623,7 @@ row_upd_del_mark_clust_rec(
ulint err;
ut_ad(node);
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
ut_ad(node->is_delete);
pcur = node->pcur;
......
......@@ -426,7 +426,7 @@ row_vers_build_for_consistent_read(
byte* buf;
ulint err;
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
ut_ad(mtr_memo_contains(mtr, buf_block_align(rec), MTR_MEMO_PAGE_X_FIX)
|| mtr_memo_contains(mtr, buf_block_align(rec),
MTR_MEMO_PAGE_S_FIX));
......@@ -555,7 +555,7 @@ row_vers_build_for_semi_consistent_read(
ulint err;
dulint rec_trx_id;
ut_ad(index->type & DICT_CLUSTERED);
ut_ad(dict_index_is_clust(index));
ut_ad(mtr_memo_contains(mtr, buf_block_align(rec), MTR_MEMO_PAGE_X_FIX)
|| mtr_memo_contains(mtr, buf_block_align(rec),
MTR_MEMO_PAGE_S_FIX));
......
......@@ -339,7 +339,7 @@ trx_undo_rec_get_row_ref(
ulint i;
ut_ad(index && ptr && ref && heap);
ut_a(index->type & DICT_CLUSTERED);
ut_a(dict_index_is_clust(index));
ref_len = dict_index_get_n_unique(index);
......@@ -376,7 +376,7 @@ trx_undo_rec_skip_row_ref(
ulint i;
ut_ad(index && ptr);
ut_a(index->type & DICT_CLUSTERED);
ut_a(dict_index_is_clust(index));
ref_len = dict_index_get_n_unique(index);
......@@ -429,7 +429,7 @@ trx_undo_page_report_modify(
byte* type_cmpl_ptr;
ulint i;
ut_a(index->type & DICT_CLUSTERED);
ut_a(dict_index_is_clust(index));
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR
+ TRX_UNDO_PAGE_TYPE) == TRX_UNDO_UPDATE);
......@@ -796,7 +796,7 @@ trx_undo_update_rec_get_update(
ulint field_no;
ulint i;
ut_a(index->type & DICT_CLUSTERED);
ut_a(dict_index_is_clust(index));
if (type != TRX_UNDO_DEL_MARK_REC) {
ptr = trx_undo_update_rec_get_n_upd_fields(ptr, &n_fields);
......@@ -1023,7 +1023,7 @@ trx_undo_report_row_operation(
ulint* offsets = offsets_;
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
ut_a(index->type & DICT_CLUSTERED);
ut_a(dict_index_is_clust(index));
if (flags & BTR_NO_UNDO_LOG_FLAG) {
......@@ -1287,7 +1287,7 @@ trx_undo_prev_version_build(
MTR_MEMO_PAGE_X_FIX));
ut_ad(rec_offs_validate(rec, index, offsets));
if (!(index->type & DICT_CLUSTERED)) {
if (!dict_index_is_clust(index)) {
fprintf(stderr, "InnoDB: Error: trying to access"
" update undo rec for non-clustered index %s\n"
"InnoDB: Submit a detailed bug report to"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment