Commit bfb3d327 authored by Marko Mäkelä's avatar Marko Mäkelä

Various fixes for ROW_FORMAT=REDUNDANT

rec_get_offsets(): Use the 'leaf' parameter for non-debug purposes.
Initialize all fields for ROW_FORMAT=REDUNDANT records that precede
an instant ADD COLUMN.

rec_offs_make_valid(): Add the parameter 'leaf'.

rec_copy_prefix_to_dtuple(): Assert that the tuple is only built
on the core fields. Instant ADD COLUMN only applies to the
clustered index, and we should never build a search key that has
more than the PRIMARY KEY and possibly DB_TRX_ID,DB_ROLL_PTR.
All these columns are always present.

dict_index_build_data_tuple(): Remove assertions that would be
duplicated in rec_copy_prefix_to_dtuple().

rec_get_n_nullable(): Get the number of nullable columns.

rec_init_offsets(): Support ROW_FORMAT=REDUNDANT records that
were written before instant ADD COLUMN.

cmp_rec_rec_with_match(): Implement comparison of MIN_REC_FLAG records.
parent 84167947
......@@ -170,13 +170,14 @@ PageBulk::insert(
ut_ad(m_heap != NULL);
rec_size = rec_offs_size(offsets);
ut_d(const bool is_leaf = page_rec_is_leaf(m_cur_rec));
#ifdef UNIV_DEBUG
/* Check whether records are in order. */
if (!page_rec_is_infimum(m_cur_rec)) {
rec_t* old_rec = m_cur_rec;
ulint* old_offsets = rec_get_offsets(
old_rec, m_index, NULL, page_rec_is_leaf(old_rec),
old_rec, m_index, NULL, is_leaf,
ULINT_UNDEFINED, &m_heap);
ut_ad(cmp_rec_rec(rec, old_rec, offsets, old_offsets, m_index)
......@@ -188,7 +189,7 @@ PageBulk::insert(
/* 1. Copy the record to page. */
rec_t* insert_rec = rec_copy(m_heap_top, rec, offsets);
rec_offs_make_valid(insert_rec, m_index, offsets);
rec_offs_make_valid(insert_rec, m_index, is_leaf, offsets);
/* 2. Insert the record in the linked list. */
rec_t* next_rec = page_rec_get_next(m_cur_rec);
......
......@@ -3666,7 +3666,8 @@ btr_cur_update_alloc_zip_func(
goto out_of_space;
}
rec_offs_make_valid(page_cur_get_rec(cursor), index, offsets);
rec_offs_make_valid(page_cur_get_rec(cursor), index,
page_is_leaf(page), offsets);
/* After recompressing a page, we must make sure that the free
bits in the insert buffer bitmap will not exceed the free
......@@ -4442,11 +4443,12 @@ btr_cur_pessimistic_update(
}
bool adjust = big_rec_vec && (flags & BTR_KEEP_POS_FLAG);
ut_ad(!adjust || page_is_leaf(page));
if (btr_cur_compress_if_useful(cursor, adjust, mtr)) {
if (adjust) {
rec_offs_make_valid(
page_cursor->rec, index, *offsets);
rec_offs_make_valid(page_cursor->rec, index,
true, *offsets);
}
} else if (!dict_index_is_clust(index)
&& page_is_leaf(page)) {
......@@ -6166,7 +6168,7 @@ btr_estimate_number_of_different_key_vals(
page = btr_cur_get_page(&cursor);
rec = page_rec_get_next(page_get_infimum_rec(page));
ut_d(const bool is_leaf = page_is_leaf(page));
const bool is_leaf = page_is_leaf(page);
if (!page_rec_is_supremum(rec)) {
not_empty_flag = 1;
......@@ -6727,8 +6729,8 @@ struct btr_blob_log_check_t {
*m_block = btr_pcur_get_block(m_pcur);
*m_rec = btr_pcur_get_rec(m_pcur);
ut_d(rec_offs_make_valid(
*m_rec, index, const_cast<ulint*>(m_offsets)));
rec_offs_make_valid(*m_rec, index, true,
const_cast<ulint*>(m_offsets));
ut_ad(m_mtr->memo_contains_page_flagged(
*m_rec,
......
......@@ -5733,21 +5733,14 @@ dict_index_copy_rec_order_prefix(
@param[in,out] heap memory heap for allocation
@return own: data tuple */
dtuple_t*
dict_index_build_data_tuple_func(
dict_index_build_data_tuple(
const rec_t* rec,
const dict_index_t* index,
#ifdef UNIV_DEBUG
bool leaf,
#endif /* UNIV_DEBUG */
ulint n_fields,
mem_heap_t* heap)
{
dtuple_t* tuple;
ut_ad(dict_table_is_comp(index->table)
|| n_fields <= rec_get_n_fields_old(rec));
tuple = dtuple_create(heap, n_fields);
dtuple_t* tuple = dtuple_create(heap, n_fields);
dict_index_copy_types(tuple, index, n_fields);
......
......@@ -85,7 +85,7 @@ rtr_page_split_initialize_nodes(
stop = task + n_recs;
rec = page_rec_get_next(page_get_infimum_rec(page));
ut_d(const bool is_leaf = page_is_leaf(page));
const bool is_leaf = page_is_leaf(page);
*offsets = rec_get_offsets(rec, cursor->index, *offsets, is_leaf,
n_uniq, &heap);
......
......@@ -1495,22 +1495,13 @@ dict_index_copy_rec_order_prefix(
@param[in,out] heap memory heap for allocation
@return own: data tuple */
dtuple_t*
dict_index_build_data_tuple_func(
dict_index_build_data_tuple(
const rec_t* rec,
const dict_index_t* index,
#ifdef UNIV_DEBUG
bool leaf,
#endif /* UNIV_DEBUG */
ulint n_fields,
mem_heap_t* heap)
MY_ATTRIBUTE((nonnull, warn_unused_result));
#ifdef UNIV_DEBUG
# define dict_index_build_data_tuple(rec, index, leaf, n_fields, heap) \
dict_index_build_data_tuple_func(rec, index, leaf, n_fields, heap)
#else /* UNIV_DEBUG */
# define dict_index_build_data_tuple(rec, index, leaf, n_fields, heap) \
dict_index_build_data_tuple_func(rec, index, n_fields, heap)
#endif /* UNIV_DEBUG */
/*********************************************************************//**
Gets the space id of the root of the index tree.
......
......@@ -466,9 +466,7 @@ rec_get_offsets_func(
const rec_t* rec,
const dict_index_t* index,
ulint* offsets,
#ifdef UNIV_DEBUG
bool leaf,
#endif /* UNIV_DEBUG */
ulint n_fields,
#ifdef UNIV_DEBUG
const char* file, /*!< in: file name where called */
......@@ -478,7 +476,7 @@ rec_get_offsets_func(
#ifdef UNIV_DEBUG
MY_ATTRIBUTE((nonnull(1,2,6,8),warn_unused_result));
#else /* UNIV_DEBUG */
MY_ATTRIBUTE((nonnull(1,2,5),warn_unused_result));
MY_ATTRIBUTE((nonnull(1,2,6),warn_unused_result));
#endif /* UNIV_DEBUG */
#ifdef UNIV_DEBUG
......@@ -486,7 +484,7 @@ rec_get_offsets_func(
rec_get_offsets_func(rec,index,offsets,leaf,n,__FILE__,__LINE__,heap)
#else /* UNIV_DEBUG */
# define rec_get_offsets(rec, index, offsets, leaf, n, heap) \
rec_get_offsets_func(rec, index, offsets, n, heap)
rec_get_offsets_func(rec, index, offsets, leaf, n, heap)
#endif /* UNIV_DEBUG */
/******************************************************//**
......@@ -506,32 +504,31 @@ rec_get_offsets_reverse(
offsets[0] allocated elements */
MY_ATTRIBUTE((nonnull));
#ifdef UNIV_DEBUG
/************************************************************//**
Validates offsets returned by rec_get_offsets().
@return TRUE if valid */
UNIV_INLINE
ibool
/** Validate offsets returned by rec_get_offsets().
@param[in] rec record, or NULL
@param[in] index the index that the record belongs in, or NULL
@param[in,out] offsets the offsets of the record
@return true */
bool
rec_offs_validate(
/*==============*/
const rec_t* rec, /*!< in: record or NULL */
const dict_index_t* index, /*!< in: record descriptor or NULL */
const ulint* offsets)/*!< in: array returned by
rec_get_offsets() */
const rec_t* rec,
const dict_index_t* index,
const ulint* offsets)
MY_ATTRIBUTE((nonnull(3), warn_unused_result));
/************************************************************//**
Updates debug data in offsets, in order to avoid bogus
rec_offs_validate() failures. */
UNIV_INLINE
/** Update debug data in offsets, in order to tame rec_offs_validate().
@param[in] rec record
@param[in] index the index that the record belongs in
@param[in] leaf whether the record resides in a leaf page
@param[in,out] offsets offsets from rec_get_offsets() to adjust */
void
rec_offs_make_valid(
/*================*/
const rec_t* rec, /*!< in: record */
const dict_index_t* index, /*!< in: record descriptor */
ulint* offsets)/*!< in: array returned by
rec_get_offsets() */
const rec_t* rec,
const dict_index_t* index,
bool leaf,
ulint* offsets)
MY_ATTRIBUTE((nonnull));
#else
# define rec_offs_make_valid(rec, index, offsets) ((void) 0)
# define rec_offs_make_valid(rec, index, leaf, offsets)
#endif /* UNIV_DEBUG */
/************************************************************//**
......@@ -991,23 +988,14 @@ The fields are copied into the memory heap.
@param[in] n_fields number of fields to copy
@param[in,out] heap memory heap */
void
rec_copy_prefix_to_dtuple_func(
rec_copy_prefix_to_dtuple(
dtuple_t* tuple,
const rec_t* rec,
const dict_index_t* index,
#ifdef UNIV_DEBUG
bool is_leaf,
#endif /* UNIV_DEBUG */
ulint n_fields,
mem_heap_t* heap)
MY_ATTRIBUTE((nonnull));
#ifdef UNIV_DEBUG
# define rec_copy_prefix_to_dtuple(tuple,rec,index,leaf,n_fields,heap) \
rec_copy_prefix_to_dtuple_func(tuple,rec,index,leaf,n_fields,heap)
#else /* UNIV_DEBUG */
# define rec_copy_prefix_to_dtuple(tuple,rec,index,leaf,n_fields,heap) \
rec_copy_prefix_to_dtuple_func(tuple,rec,index,n_fields,heap)
#endif /* UNIV_DEBUG */
/***************************************************************//**
Validates the consistency of a physical record.
@return TRUE if ok */
......
......@@ -29,16 +29,16 @@ Created 5/30/1994 Heikki Tuuri
#include "dict0boot.h"
#include "btr0types.h"
/* Compact flag ORed to the extra size returned by rec_get_offsets() */
#define REC_OFFS_COMPACT ((ulint) 1 << 31)
/* SQL NULL flag in offsets returned by rec_get_offsets() */
#define REC_OFFS_SQL_NULL ((ulint) 1 << 31)
/* External flag in offsets returned by rec_get_offsets() */
#define REC_OFFS_EXTERNAL ((ulint) 1 << 30)
/* Default value flag in offsets returned by rec_get_offsets() */
#define REC_OFFS_DEFAULT ((ulint) 1 << 29)
/* Mask for offsets returned by rec_get_offsets() */
#define REC_OFFS_MASK (REC_OFFS_DEFAULT - 1)
/** Compact flag ORed to the extra size returned by rec_get_offsets() */
const ulint REC_OFFS_COMPACT = ~(ulint(~0) >> 1);
/** SQL NULL flag in offsets returned by rec_get_offsets() */
const ulint REC_OFFS_SQL_NULL = REC_OFFS_COMPACT;
/** External flag in offsets returned by rec_get_offsets() */
const ulint REC_OFFS_EXTERNAL = REC_OFFS_COMPACT >> 1;
/** Default value flag in offsets returned by rec_get_offsets() */
const ulint REC_OFFS_DEFAULT = REC_OFFS_COMPACT >> 2;
/** Mask for offsets returned by rec_get_offsets() */
const ulint REC_OFFS_MASK = REC_OFFS_DEFAULT - 1;
/* Offsets of the bit-fields in an old-style record. NOTE! In the table the
most significant bytes and bits are written below less significant.
......@@ -987,83 +987,6 @@ rec_offs_n_fields(
return(n_fields);
}
/************************************************************//**
Validates offsets returned by rec_get_offsets().
@return TRUE if valid */
UNIV_INLINE
ibool
rec_offs_validate(
/*==============*/
const rec_t* rec, /*!< in: record or NULL */
const dict_index_t* index, /*!< in: record descriptor or NULL */
const ulint* offsets)/*!< in: array returned by
rec_get_offsets() */
{
ulint i = rec_offs_n_fields(offsets);
ulint last = ULINT_MAX;
ulint comp = *rec_offs_base(offsets) & REC_OFFS_COMPACT;
if (rec) {
ut_ad((ulint) rec == offsets[2]);
if (!comp) {
ut_a(rec_get_n_fields_old(rec) >= i);
}
}
if (index) {
ulint max_n_fields;
ut_ad((ulint) index == offsets[3]);
max_n_fields = ut_max(
dict_index_get_n_fields(index),
dict_index_get_n_unique_in_tree(index) + 1);
if (comp && rec) {
switch (rec_get_status(rec)) {
case REC_STATUS_ORDINARY:
break;
case REC_STATUS_NODE_PTR:
max_n_fields = dict_index_get_n_unique_in_tree(
index) + 1;
break;
case REC_STATUS_INFIMUM:
case REC_STATUS_SUPREMUM:
max_n_fields = 1;
break;
default:
ut_error;
}
}
/* index->n_def == 0 for dummy indexes if !comp */
ut_a(!comp || index->n_def);
ut_a(!index->n_def || i <= max_n_fields);
}
while (i--) {
ulint curr = rec_offs_base(offsets)[1 + i] & REC_OFFS_MASK;
ut_a(curr <= last);
last = curr;
}
return(TRUE);
}
#ifdef UNIV_DEBUG
/************************************************************//**
Updates debug data in offsets, in order to avoid bogus
rec_offs_validate() failures. */
UNIV_INLINE
void
rec_offs_make_valid(
/*================*/
const rec_t* rec, /*!< in: record */
const dict_index_t* index, /*!< in: record descriptor */
ulint* offsets)/*!< in: array returned by
rec_get_offsets() */
{
ut_ad(rec);
ut_ad(index);
ut_ad(offsets);
ut_ad(rec_get_n_fields(rec, index) >= rec_offs_n_fields(offsets));
offsets[2] = (ulint) rec;
offsets[3] = (ulint) index;
}
#endif /* UNIV_DEBUG */
/************************************************************//**
The following function is used to get an offset to the nth
data field in a record.
......
......@@ -662,7 +662,7 @@ page_cur_search_with_match_bytes(
/* Perform binary search until the lower and upper limit directory
slots come to the distance 1 of each other */
ut_d(bool is_leaf = page_is_leaf(page));
const bool is_leaf = page_is_leaf(page);
while (up - low > 1) {
mid = (low + up) / 2;
......@@ -860,7 +860,7 @@ page_cur_insert_rec_write_log(
ut_ad(!page_rec_is_comp(insert_rec)
== !dict_table_is_comp(index->table));
ut_d(const bool is_leaf = page_rec_is_leaf(cursor_rec));
const bool is_leaf = page_rec_is_leaf(cursor_rec);
{
mem_heap_t* heap = NULL;
......@@ -1134,7 +1134,7 @@ page_cur_parse_insert_rec(
/* Read from the log the inserted index record end segment which
differs from the cursor record */
ut_d(bool is_leaf = page_is_leaf(page));
const bool is_leaf = page_is_leaf(page);
offsets = rec_get_offsets(cursor_rec, index, offsets, is_leaf,
ULINT_UNDEFINED, &heap);
......@@ -1171,9 +1171,15 @@ page_cur_parse_insert_rec(
ut_memcpy(buf + mismatch_index, ptr, end_seg_len);
if (page_is_comp(page)) {
/* Make rec_get_offsets() and rec_offs_make_valid() happy. */
ut_d(rec_set_heap_no_new(buf + origin_offset,
PAGE_HEAP_NO_USER_LOW));
rec_set_info_and_status_bits(buf + origin_offset,
info_and_status_bits);
} else {
/* Make rec_get_offsets() and rec_offs_make_valid() happy. */
ut_d(rec_set_heap_no_old(buf + origin_offset,
PAGE_HEAP_NO_USER_LOW));
rec_set_info_bits_old(buf + origin_offset,
info_and_status_bits);
}
......@@ -1312,7 +1318,7 @@ page_cur_insert_rec_low(
/* 3. Create the record */
insert_rec = rec_copy(insert_buf, rec, offsets);
rec_offs_make_valid(insert_rec, index, offsets);
rec_offs_make_valid(insert_rec, index, page_is_leaf(page), offsets);
/* This is because assertion below is debug assertion */
#ifdef UNIV_DEBUG
......@@ -1602,7 +1608,8 @@ page_cur_insert_rec_zip(
/* This should be followed by
MLOG_ZIP_PAGE_COMPRESS_NO_DATA,
which should succeed. */
rec_offs_make_valid(insert_rec, index, offsets);
rec_offs_make_valid(insert_rec, index,
page_is_leaf(page), offsets);
} else {
ulint pos = page_rec_get_n_recs_before(insert_rec);
ut_ad(pos > 0);
......@@ -1618,7 +1625,8 @@ page_cur_insert_rec_zip(
level, page, index, mtr);
rec_offs_make_valid(
insert_rec, index, offsets);
insert_rec, index,
page_is_leaf(page), offsets);
return(insert_rec);
}
......@@ -1661,7 +1669,8 @@ page_cur_insert_rec_zip(
insert_rec = page + rec_get_next_offs(
cursor->rec, TRUE);
rec_offs_make_valid(
insert_rec, index, offsets);
insert_rec, index,
page_is_leaf(page), offsets);
return(insert_rec);
}
......@@ -1803,7 +1812,7 @@ page_cur_insert_rec_zip(
/* 3. Create the record */
insert_rec = rec_copy(insert_buf, rec, offsets);
rec_offs_make_valid(insert_rec, index, offsets);
rec_offs_make_valid(insert_rec, index, page_is_leaf(page), offsets);
/* 4. Insert the record in the linked list of records */
ut_ad(cursor->rec != insert_rec);
......@@ -2075,7 +2084,7 @@ page_copy_rec_list_end_to_created_page(
slot_index = 0;
n_recs = 0;
ut_d(const bool is_leaf = page_is_leaf(new_page));
const bool is_leaf = page_is_leaf(new_page);
do {
offsets = rec_get_offsets(rec, index, offsets, is_leaf,
......@@ -2120,7 +2129,7 @@ page_copy_rec_list_end_to_created_page(
heap_top += rec_size;
rec_offs_make_valid(insert_rec, index, offsets);
rec_offs_make_valid(insert_rec, index, is_leaf, offsets);
page_cur_insert_rec_write_log(insert_rec, rec_size, prev_rec,
index, mtr);
prev_rec = insert_rec;
......
......@@ -599,7 +599,7 @@ page_copy_rec_list_end_no_locks(
ut_a(page_is_comp(new_page) == page_rec_is_comp(rec));
ut_a(mach_read_from_2(new_page + UNIV_PAGE_SIZE - 10) == (ulint)
(page_is_comp(new_page) ? PAGE_NEW_INFIMUM : PAGE_OLD_INFIMUM));
ut_d(const bool is_leaf = page_is_leaf(block->frame));
const bool is_leaf = page_is_leaf(block->frame);
cur2 = page_get_infimum_rec(buf_block_get_frame(new_block));
......@@ -1107,7 +1107,7 @@ page_delete_rec_list_end(
? MLOG_COMP_LIST_END_DELETE
: MLOG_LIST_END_DELETE, mtr);
ut_d(const bool is_leaf = page_is_leaf(page));
const bool is_leaf = page_is_leaf(page);
if (page_zip) {
mtr_log_t log_mode;
......@@ -1298,7 +1298,7 @@ page_delete_rec_list_start(
/* Individual deletes are not logged */
mtr_log_t log_mode = mtr_set_log_mode(mtr, MTR_LOG_NONE);
ut_d(const bool is_leaf = page_rec_is_leaf(rec));
const bool is_leaf = page_rec_is_leaf(rec);
while (page_cur_get_rec(&cur1) != rec) {
offsets = rec_get_offsets(page_cur_get_rec(&cur1), index,
......
......@@ -2174,7 +2174,7 @@ page_zip_apply_log(
rec_get_offsets_reverse(data, index,
hs & REC_STATUS_NODE_PTR,
offsets);
rec_offs_make_valid(rec, index, offsets);
rec_offs_make_valid(rec, index, is_leaf, offsets);
/* Copy the extra bytes (backwards). */
{
......
......@@ -1153,10 +1153,9 @@ cmp_rec_rec_with_match(
/* Test if rec is the predefined minimum record */
if (UNIV_UNLIKELY(rec_get_info_bits(rec1, comp)
& REC_INFO_MIN_REC_FLAG)) {
/* There should only be one such record. */
ut_ad(!(rec_get_info_bits(rec2, comp)
& REC_INFO_MIN_REC_FLAG));
ret = -1;
ret = UNIV_UNLIKELY(rec_get_info_bits(rec2, comp)
& REC_INFO_MIN_REC_FLAG)
? 0 : -1;
goto order_resolved;
} else if (UNIV_UNLIKELY
(rec_get_info_bits(rec2, comp)
......
......@@ -234,24 +234,22 @@ rec_get_n_extern_new(
return(n_extern);
}
/*******************************************************************//**
Get the bit number of nullable bitmap. */
UNIV_INTERN
/** Get the number of nullable columns.
@param[in] rec clustered index record with added columns
@param[in] index clustered index
@return number of possibly NULL columns in rec */
static
ulint
rec_get_n_nullable(
/*=======================*/
const rec_t* rec, /*!< in: gcs_record */
const dict_index_t* index) /*!< in: clustered index */
rec_get_n_nullable(const rec_t* rec, const dict_index_t* index)
{
ulint field_count = 0;
ulint field_count_len = 0;
ut_ad(rec_is_instant(rec) && index->is_instant());
field_count = rec_get_field_count(rec, &field_count_len);
ut_ad(field_count_len == rec_get_field_count_len(field_count));
ut_ad(index->is_instant());
ut_ad(dict_table_is_comp(index->table));
ut_ad(rec_is_instant(rec));
return dict_index_get_first_n_field_n_nullable(index, field_count);
ulint len;
ulint n_fields = rec_get_field_count(rec, &len);
ut_ad(len == rec_get_field_count_len(n_fields));
return dict_index_get_first_n_field_n_nullable(index, n_fields);
}
/******************************************************//**
......@@ -412,9 +410,111 @@ rec_init_offsets_comp_ordinary(
= (rec - (lens + 1)) | REC_OFFS_COMPACT | any;
}
/******************************************************//**
The following function determines the offsets to each field in the
record. The offsets are written to a previously allocated array of
#ifdef UNIV_DEBUG
/** Update debug data in offsets, in order to tame rec_offs_validate().
@param[in] rec record
@param[in] index the index that the record belongs in
@param[in] leaf whether the record resides in a leaf page
@param[in,out] offsets offsets from rec_get_offsets() to adjust */
void
rec_offs_make_valid(
const rec_t* rec,
const dict_index_t* index,
bool leaf,
ulint* offsets)
{
ut_ad(rec_offs_n_fields(offsets)
<= (leaf
? dict_index_get_n_fields(index)
: dict_index_get_n_unique_in_tree_nonleaf(index) + 1)
|| index->is_dummy || dict_index_is_ibuf(index));
const bool is_user_rec = (dict_table_is_comp(index->table)
? rec_get_heap_no_new(rec)
: rec_get_heap_no_old(rec))
>= PAGE_HEAP_NO_USER_LOW;
ulint n = rec_get_n_fields(rec, index);
/* The infimum and supremum records carry 1 field. */
ut_ad(is_user_rec || n == 1);
ut_ad(is_user_rec || rec_offs_n_fields(offsets) == 1);
ut_ad(!is_user_rec || n >= index->n_core_fields
|| n >= rec_offs_n_fields(offsets));
for (; n < rec_offs_n_fields(offsets); n++) {
ut_ad(leaf);
ut_ad(rec_offs_base(offsets)[1 + n] & REC_OFFS_DEFAULT);
}
offsets[2] = ulint(rec);
offsets[3] = ulint(index);
}
/** Validate offsets returned by rec_get_offsets().
@param[in] rec record, or NULL
@param[in] index the index that the record belongs in, or NULL
@param[in,out] offsets the offsets of the record
@return true */
bool
rec_offs_validate(
const rec_t* rec,
const dict_index_t* index,
const ulint* offsets)
{
ulint i = rec_offs_n_fields(offsets);
ulint last = ULINT_MAX;
ulint comp = *rec_offs_base(offsets) & REC_OFFS_COMPACT;
if (rec) {
ut_ad(ulint(rec) == offsets[2]);
if (!comp) {
const bool is_user_rec = rec_get_heap_no_old(rec)
>= PAGE_HEAP_NO_USER_LOW;
ulint n = rec_get_n_fields_old(rec);
/* The infimum and supremum records carry 1 field. */
ut_ad(is_user_rec || n == 1);
ut_ad(is_user_rec || i == 1);
ut_ad(!is_user_rec || n >= i
|| n >= index->n_core_fields);
for (; n < i; n++) {
ut_ad(rec_offs_base(offsets)[1 + n]
& REC_OFFS_DEFAULT);
}
}
}
if (index) {
ulint max_n_fields;
ut_ad(ulint(index) == offsets[3]);
max_n_fields = ut_max(
dict_index_get_n_fields(index),
dict_index_get_n_unique_in_tree(index) + 1);
if (comp && rec) {
switch (rec_get_status(rec)) {
case REC_STATUS_ORDINARY:
break;
case REC_STATUS_NODE_PTR:
max_n_fields = dict_index_get_n_unique_in_tree(
index) + 1;
break;
case REC_STATUS_INFIMUM:
case REC_STATUS_SUPREMUM:
max_n_fields = 1;
break;
default:
ut_error;
}
}
/* index->n_def == 0 for dummy indexes if !comp */
ut_a(!comp || index->n_def);
ut_a(!index->n_def || i <= max_n_fields);
}
while (i--) {
ulint curr = rec_offs_base(offsets)[1 + i] & REC_OFFS_MASK;
ut_a(curr <= last);
last = curr;
}
return(TRUE);
}
#endif /* UNIV_DEBUG */
/** Determine the offsets to each field in the record.
The offsets are written to a previously allocated array of
ulint, where rec_offs_n_fields(offsets) has been initialized to the
number of fields in the record. The rest of the array will be
initialized by this function. rec_offs_base(offsets)[0] will be set
......@@ -425,21 +525,25 @@ offsets past the end of fields 0..n_fields, or to the beginning of
fields 1..n_fields+1. When the high-order bit of the offset at [i+1]
is set (REC_OFFS_SQL_NULL), the field i is NULL. When the second
high-order bit of the offset at [i+1] is set (REC_OFFS_EXTERNAL), the
field i is being stored externally. */
field i is being stored externally.
@param[in] rec record
@param[in] index the index that the record belongs in
@param[in] leaf whether the record resides in a leaf page
@param[in,out] offsets array of offsets, with valid rec_offs_n_fields() */
static
void
rec_init_offsets(
/*=============*/
const rec_t* rec, /*!< in: physical record */
const dict_index_t* index, /*!< in: record descriptor */
ulint* offsets)/*!< in/out: array of offsets;
in: n=rec_offs_n_fields(offsets) */
const rec_t* rec,
const dict_index_t* index,
bool leaf,
ulint* offsets)
{
ulint i = 0;
ulint offs;
ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable));
rec_offs_make_valid(rec, index, offsets);
ut_d(offsets[2] = ulint(rec));
ut_d(offsets[3] = ulint(index));
if (dict_table_is_comp(index->table)) {
const byte* nulls;
......@@ -458,25 +562,26 @@ rec_init_offsets(
rec_offs_base(offsets)[1] = 8;
return;
case REC_STATUS_NODE_PTR:
ut_ad(!leaf);
n_node_ptr_field
= dict_index_get_n_unique_in_tree_nonleaf(
index);
break;
case REC_STATUS_ORDINARY:
ut_ad(leaf);
rec_init_offsets_comp_ordinary(
rec, false, index, offsets);
return;
}
/** The n_nullable flags in the clustered index node pointer
records in ROW_FORMAT=COMPACT or ROW_FORMAT=DYNAMIC must
reflect the number of 'core columns'. These flags are
useless garbage, and they are only reserved because of
file format compatibility.
(Clustered index node pointer records only contain the
PRIMARY KEY columns, which are always NOT NULL,
so we should have used n_nullable=0.)
*/
/* The n_nullable flags in the clustered index node pointer
records in ROW_FORMAT=COMPACT or ROW_FORMAT=DYNAMIC must
reflect the number of 'core columns'. These flags are
useless garbage, and they are only reserved because of
file format compatibility.
(Clustered index node pointer records only contain the
PRIMARY KEY columns, which are always NOT NULL,
so we should have used n_nullable=0.) */
ut_ad(!rec_is_instant(rec));
ut_ad(index->n_core_fields > 0);
......@@ -561,9 +666,13 @@ rec_init_offsets(
} else {
/* Old-style record: determine extra size and end offsets */
offs = REC_N_OLD_EXTRA_BYTES;
const ulint n_fields = rec_get_n_fields_old(rec);
const ulint n = std::min(n_fields, rec_offs_n_fields(offsets));
ulint any;
if (rec_get_1byte_offs_flag(rec)) {
offs += rec_offs_n_fields(offsets);
*rec_offs_base(offsets) = offs;
offs += n_fields;
any = offs;
/* Determine offsets to fields */
do {
offs = rec_1_get_field_end_info(rec, i);
......@@ -572,10 +681,10 @@ rec_init_offsets(
offs |= REC_OFFS_SQL_NULL;
}
rec_offs_base(offsets)[1 + i] = offs;
} while (++i < rec_offs_n_fields(offsets));
} while (++i < n);
} else {
offs += 2 * rec_offs_n_fields(offsets);
*rec_offs_base(offsets) = offs;
offs += 2 * n_fields;
any = offs;
/* Determine offsets to fields */
do {
offs = rec_2_get_field_end_info(rec, i);
......@@ -586,11 +695,23 @@ rec_init_offsets(
if (offs & REC_2BYTE_EXTERN_MASK) {
offs &= ~REC_2BYTE_EXTERN_MASK;
offs |= REC_OFFS_EXTERNAL;
*rec_offs_base(offsets) |= REC_OFFS_EXTERNAL;
any |= REC_OFFS_EXTERNAL;
}
rec_offs_base(offsets)[1 + i] = offs;
} while (++i < n);
}
if (i < rec_offs_n_fields(offsets)) {
offs = rec_offs_base(offsets)[i] | REC_OFFS_DEFAULT;
do {
rec_offs_base(offsets)[1 + i] = offs;
} while (++i < rec_offs_n_fields(offsets));
any |= REC_OFFS_DEFAULT;
}
*rec_offs_base(offsets) = any;
}
}
......@@ -609,9 +730,7 @@ rec_get_offsets_func(
const rec_t* rec,
const dict_index_t* index,
ulint* offsets,
#ifdef UNIV_DEBUG
bool leaf,
#endif /* UNIV_DEBUG */
ulint n_fields,
#ifdef UNIV_DEBUG
const char* file, /*!< in: file name where called */
......@@ -661,10 +780,8 @@ rec_get_offsets_func(
page_rec_is_user_rec(rec) and similar predicates
cannot be evaluated. We can still distinguish the
infimum and supremum record based on the heap number. */
ut_d(const bool is_user_rec = rec_get_heap_no_old(rec)
>= PAGE_HEAP_NO_USER_LOW);
ut_ad(n <= ulint(index->n_fields + !leaf) || index->is_dummy
|| dict_index_is_ibuf(index));
const bool is_user_rec = rec_get_heap_no_old(rec)
>= PAGE_HEAP_NO_USER_LOW;
/* The infimum and supremum records carry 1 field. */
ut_ad(is_user_rec || n == 1);
ut_ad(!is_user_rec || leaf || index->is_dummy
......@@ -674,9 +791,13 @@ rec_get_offsets_func(
ut_ad(!is_user_rec || !leaf || index->is_dummy
|| dict_index_is_ibuf(index)
|| n == n_fields /* btr_pcur_restore_position() */
|| n == index->n_fields
|| (index->id == DICT_INDEXES_ID
&& (n == DICT_NUM_FIELDS__SYS_INDEXES - 1)));
|| (n >= index->n_core_fields && n <= index->n_fields));
if (is_user_rec && leaf && n < index->n_fields) {
ut_ad(!index->is_dummy);
ut_ad(!dict_index_is_ibuf(index));
n = index->n_fields;
}
}
if (UNIV_UNLIKELY(n_fields < n)) {
......@@ -700,7 +821,7 @@ rec_get_offsets_func(
}
rec_offs_set_n_fields(offsets, n);
rec_init_offsets(rec, index, offsets);
rec_init_offsets(rec, index, leaf, offsets);
return(offsets);
}
......@@ -1251,17 +1372,14 @@ rec_convert_dtuple_to_rec_old(
/* Calculate the offset of the origin in the physical record */
rec = buf + rec_get_converted_extra_size(data_size, n_fields, n_ext);
#ifdef UNIV_DEBUG
/* Suppress Valgrind warnings of ut_ad()
in mach_write_to_1(), mach_write_to_2() et al. */
memset(buf, 0xff, rec - buf + data_size);
#endif /* UNIV_DEBUG */
/* Store the number of fields */
rec_set_n_fields_old(rec, n_fields);
/* Set the info bits of the record */
rec_set_info_bits_old(rec, dtuple_get_info_bits(dtuple)
& REC_INFO_BITS_MASK);
/* Make rec_get_offsets() and rec_offs_make_valid() happy. */
ut_d(rec_set_heap_no_old(rec, PAGE_HEAP_NO_USER_LOW));
/* Store the data and the offsets */
......@@ -1335,10 +1453,9 @@ rec_convert_dtuple_to_rec_old(
/*********************************************************//**
Builds a ROW_FORMAT=COMPACT record out of a data tuple.
@return TRUE if instant record.
*/
@return true if instant record */
UNIV_INLINE
ibool
bool
rec_convert_dtuple_to_rec_comp(
/*===========================*/
rec_t* rec, /*!< in: origin of record */
......@@ -1380,6 +1497,8 @@ rec_convert_dtuple_to_rec_comp(
temp = false;
}
} else {
/* Make rec_get_offsets() and rec_offs_make_valid() happy. */
ut_d(rec_set_heap_no_new(rec, PAGE_HEAP_NO_USER_LOW));
nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1);
switch (UNIV_EXPECT(status, REC_STATUS_ORDINARY)) {
......@@ -1698,13 +1817,11 @@ The fields are copied into the memory heap.
@param[in] n_fields number of fields to copy
@param[in,out] heap memory heap */
void
rec_copy_prefix_to_dtuple_func(
rec_copy_prefix_to_dtuple(
dtuple_t* tuple,
const rec_t* rec,
const dict_index_t* index,
#ifdef UNIV_DEBUG
bool is_leaf,
#endif /* UNIV_DEBUG */
ulint n_fields,
mem_heap_t* heap)
{
......@@ -1712,7 +1829,8 @@ rec_copy_prefix_to_dtuple_func(
ulint* offsets = offsets_;
rec_offs_init(offsets_);
ut_ad(is_leaf || n_fields <= index->n_uniq + 1);
ut_ad(is_leaf || n_fields
<= dict_index_get_n_unique_in_tree_nonleaf(index) + 1);
offsets = rec_get_offsets(rec, index, offsets, is_leaf,
n_fields, &heap);
......@@ -1739,6 +1857,8 @@ rec_copy_prefix_to_dtuple_func(
dfield_set_null(field);
}
}
ut_ad(!rec_offs_any_default(offsets));
}
/**************************************************************//**
......@@ -1854,7 +1974,7 @@ rec_copy_prefix_to_buf(
field_count = rec_get_field_count(rec, &field_count_len);
ut_ad(field_count_len == rec_get_field_count_len(field_count));
ut_ad(field_count >= n_fields);
ut_ad(field_count >= n_fields);
n_nullable = rec_get_n_nullable(rec, index);
ut_ad(n_nullable <= index->n_nullable);
......
......@@ -436,7 +436,7 @@ row_build_low(
}
/* Avoid a debug assertion in rec_offs_validate(). */
rec_offs_make_valid(copy, index, const_cast<ulint*>(offsets));
rec_offs_make_valid(copy, index, true, const_cast<ulint*>(offsets));
if (!col_table) {
ut_ad(!col_map);
......@@ -526,7 +526,7 @@ row_build_low(
}
}
rec_offs_make_valid(rec, index, const_cast<ulint*>(offsets));
rec_offs_make_valid(rec, index, true, const_cast<ulint*>(offsets));
ut_ad(dtuple_check_typed(row));
......@@ -738,10 +738,12 @@ row_rec_to_index_entry(
copy_rec = rec_copy(buf, rec, offsets);
rec_offs_make_valid(copy_rec, index, const_cast<ulint*>(offsets));
rec_offs_make_valid(copy_rec, index, true,
const_cast<ulint*>(offsets));
entry = row_rec_to_index_entry_low(
copy_rec, index, offsets, n_ext, heap);
rec_offs_make_valid(rec, index, const_cast<ulint*>(offsets));
rec_offs_make_valid(rec, index, true,
const_cast<ulint*>(offsets));
dtuple_set_info_bits(entry,
rec_get_info_bits(rec, rec_offs_comp(offsets)));
......@@ -804,8 +806,7 @@ row_build_row_ref(
mem_heap_alloc(heap, rec_offs_size(offsets)));
rec = rec_copy(buf, rec, offsets);
/* Avoid a debug assertion in rec_offs_validate(). */
rec_offs_make_valid(rec, index, offsets);
rec_offs_make_valid(rec, index, true, offsets);
}
table = index->table;
......
......@@ -1210,7 +1210,7 @@ row_vers_build_for_consistent_read(
in_heap, rec_offs_size(*offsets)));
*old_vers = rec_copy(buf, prev_version, *offsets);
rec_offs_make_valid(*old_vers, index, *offsets);
rec_offs_make_valid(*old_vers, index, true, *offsets);
if (vrow && *vrow) {
*vrow = dtuple_copy(*vrow, in_heap);
......@@ -1337,7 +1337,7 @@ row_vers_build_for_semi_consistent_read(
in_heap, rec_offs_size(*offsets)));
*old_vers = rec_copy(buf, version, *offsets);
rec_offs_make_valid(*old_vers, index, *offsets);
rec_offs_make_valid(*old_vers, index, true, *offsets);
if (vrow && *vrow) {
*vrow = dtuple_copy(*vrow, in_heap);
dtuple_dup_v_fld(*vrow, in_heap);
......
......@@ -2307,7 +2307,7 @@ trx_undo_prev_version_build(
heap, rec_offs_size(offsets)));
*old_vers = rec_copy(buf, rec, offsets);
rec_offs_make_valid(*old_vers, index, offsets);
rec_offs_make_valid(*old_vers, index, true, offsets);
row_upd_rec_in_place(*old_vers, index, offsets, update, NULL);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment