MDEV-27318 Assertion `data_size < srv_sort_buf_size' failed in row_merge_bulk_buf_add

 InnoDB fails to add the tuple size which is greater than
innodb_sort_buffer_size. InnoDB should write the field
which are greater than 2000 bytes into the temporary file
and place the offset, length and make it as a new tuple.
InnoDB should buffer the newly created tuple without any
problem during bulk index
parent a23f3ee8
......@@ -231,3 +231,12 @@ SELECT COUNT(*) FROM t WHERE MBRWithin(t.c, POINT(1,1));
COUNT(*)
1
DROP TABLE t;
#
# MDEV-27318 Assertion data_size < srv_sort_buf_size failed in row_merge_bulk_buf_add
#
CREATE TABLE t1(f1 MEDIUMTEXT)ENGINE=InnoDB;
INSERT INTO t1 VALUES(REPEAT(1, 8459264));
SELECT length(f1) FROM t1;
length(f1)
8459264
DROP TABLE t1;
......@@ -242,3 +242,11 @@ CREATE TABLE t (c POINT NOT NULL, SPATIAL INDEX(c)) ENGINE=InnoDB;
INSERT INTO t VALUES (POINT(1, 1));
SELECT COUNT(*) FROM t WHERE MBRWithin(t.c, POINT(1,1));
DROP TABLE t;
--echo #
--echo # MDEV-27318 Assertion data_size < srv_sort_buf_size failed in row_merge_bulk_buf_add
--echo #
CREATE TABLE t1(f1 MEDIUMTEXT)ENGINE=InnoDB;
INSERT INTO t1 VALUES(REPEAT(1, 8459264));
SELECT length(f1) FROM t1;
DROP TABLE t1;
/*****************************************************************************
Copyright (c) 2005, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2014, 2021, MariaDB Corporation.
Copyright (c) 2014, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
......@@ -503,8 +503,6 @@ static ulint row_merge_bulk_buf_add(row_merge_buf_t* buf,
of extra_size. */
data_size += (extra_size + 1) + ((extra_size + 1) >= 0x80);
ut_ad(data_size < srv_sort_buf_size);
/* Reserve bytes for the end marker of row_merge_block_t. */
if (buf->total_size + data_size >= srv_sort_buf_size)
return 0;
......@@ -1045,30 +1043,22 @@ row_merge_buf_sort(
buf->tuples, buf->tmp_tuples, 0, buf->n_tuples);
}
/** Write the field data whose length is more than 2000 bytes
into blob temporary file and write offset, length into the
tuple field
@param entry index fields to be encode the blob
@param n_fields number of fields in the entry
@param heap heap to store the blob offset and blob length
@param blob_file file to store the blob data */
static dberr_t row_merge_buf_blob(const mtuple_t *entry, ulint n_fields,
mem_heap_t **heap, merge_file_t *blob_file)
/** Write the blob field data to temporary file and fill the offset,
length in the field data
@param field tuple field
@param blob_file file to store the blob data
@param heap heap to store the blob offset and length
@return DB_SUCCESS if successful */
static dberr_t row_merge_write_blob_to_tmp_file(
dfield_t *field, merge_file_t *blob_file,mem_heap_t **heap)
{
if (!*heap)
*heap= mem_heap_create(100);
for (ulint i= 0; i < n_fields; i++)
{
if (dfield_is_null(&entry->fields[i]) || entry->fields[i].len <= 2000)
continue;
if (blob_file->fd == OS_FILE_CLOSED)
{
blob_file->fd= row_merge_file_create_low(nullptr);
if (blob_file->fd == OS_FILE_CLOSED)
return DB_OUT_OF_MEMORY;
}
uint64_t val= blob_file->offset;
dfield_t *field= &entry->fields[i];
uint32_t len= field->len;
dberr_t err= os_file_write(
IORequestWrite, "(bulk insert)", blob_file->fd,
......@@ -1090,6 +1080,62 @@ static dberr_t row_merge_buf_blob(const mtuple_t *entry, ulint n_fields,
blob_file->n_rec++;
dfield_set_data(field, data, BTR_EXTERN_FIELD_REF_SIZE);
dfield_set_ext(field);
return err;
}
/** This function is invoked when tuple size is greater than
innodb_sort_buffer_size. Basically it recreates the tuple
by writing the blob field to the temporary file.
@param entry index fields to be encode the blob
@param blob_file file to store the blob data
@param heap heap to store the blob offset and blob length
@return tuple which fits into sort_buffer_size */
static dtuple_t* row_merge_buf_large_tuple(const dtuple_t &entry,
merge_file_t *blob_file,
mem_heap_t **heap)
{
if (!*heap)
*heap= mem_heap_create(DTUPLE_EST_ALLOC(entry.n_fields));
dtuple_t *tuple= dtuple_copy(&entry, *heap);
for (ulint i= 0; i < tuple->n_fields; i++)
{
dfield_t *field= &tuple->fields[i];
if (dfield_is_null(field) || field->len <= 2000)
continue;
dberr_t err= row_merge_write_blob_to_tmp_file(field, blob_file, heap);
if (err != DB_SUCCESS)
return nullptr;
}
return tuple;
}
/** Write the field data whose length is more than 2000 bytes
into blob temporary file and write offset, length into the
tuple field
@param entry index fields to be encode the blob
@param n_fields number of fields in the entry
@param heap heap to store the blob offset and blob length
@param blob_file file to store the blob data */
static dberr_t row_merge_buf_blob(const mtuple_t *entry, ulint n_fields,
mem_heap_t **heap, merge_file_t *blob_file)
{
if (!*heap)
*heap= mem_heap_create(100);
for (ulint i= 0; i < n_fields; i++)
{
dfield_t *field= &entry->fields[i];
if (dfield_is_null(field) || field->len <= 2000)
continue;
dberr_t err= row_merge_write_blob_to_tmp_file(field, blob_file, heap);
if (err != DB_SUCCESS)
return err;
}
return DB_SUCCESS;
......@@ -5109,6 +5155,7 @@ dberr_t row_merge_bulk_t::bulk_insert_buffered(const dtuple_t &row,
{
dberr_t err= DB_SUCCESS;
ulint i= 0;
mem_heap_t *large_tuple_heap= nullptr;
for (dict_index_t *index= UT_LIST_GET_FIRST(ind.table->indexes);
index; index= UT_LIST_GET_NEXT(indexes, index))
{
......@@ -5125,7 +5172,19 @@ dberr_t row_merge_bulk_t::bulk_insert_buffered(const dtuple_t &row,
if (row_merge_bulk_buf_add(buf, *ind.table, row))
{
i++;
return err;
goto func_exit;
}
if (buf->n_tuples == 0)
{
/* Tuple data size is greater than srv_sort_buf_size */
dtuple_t *big_tuple= row_merge_buf_large_tuple(
row, &m_blob_file, &large_tuple_heap);
if (row_merge_bulk_buf_add(buf, *ind.table, *big_tuple))
{
i++;
goto func_exit;
}
}
if (index->is_unique())
......@@ -5148,6 +5207,9 @@ dberr_t row_merge_bulk_t::bulk_insert_buffered(const dtuple_t &row,
goto add_to_buf;
}
func_exit:
if (large_tuple_heap)
mem_heap_free(large_tuple_heap);
return err;
}
......@@ -5183,7 +5245,8 @@ dberr_t row_merge_bulk_t::write_to_index(ulint index_no, trx_t *trx)
/* Data got fit in merge buffer. */
err= row_merge_insert_index_tuples(
index, table, OS_FILE_CLOSED, nullptr,
&buf, &btr_bulk, 0, 0, 0, nullptr, table->space_id);
&buf, &btr_bulk, 0, 0, 0, nullptr, table->space_id, nullptr,
m_blob_file.fd == OS_FILE_CLOSED ? nullptr : &m_blob_file);
goto func_exit;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment