Commit f5c820cd authored by unknown's avatar unknown

Fix for BUG#34114 "maria_chk reports false error when several tables on

command-line" and BUG#34062 "Maria table corruption on master".
Use 5 bytes (instead of 4) to store page's number in the checkpoint
record, to allow bigger table (1PB with maria-block-size=1kB).
Help pushbuild not run out of memory by moving the portion of
maria-recovery.test which generates lots of data into a -big.test.


mysql-test/r/maria-recovery.result:
  result moved
mysql-test/t/maria-recovery.test:
  piece which generates much data moved to maria-recovery-big.test
mysys/my_pread.c:
  To fix BUG#34062, where a 1.1TB file was generated due to a wrong
  pwrite offset, it was useful to not lose precision on 'offset' in
  DBUG_PRINT, so that the crazy value is visible.
mysys/my_read.c:
  To fix BUG#34062, where a 1.1TB file was generated due to a wrong
  pwrite offset, it was useful to not lose precision on 'offset' in
  DBUG_PRINT, so that the crazy value is visible.
mysys/my_write.c:
  To fix BUG#34062, where a 1.1TB file was generated due to a wrong
  pwrite offset, it was useful to not lose precision on 'offset' in
  DBUG_PRINT, so that the crazy value is visible.
storage/maria/ha_maria.cc:
  When starting a bulk insert, we throw away dirty index pages from the
  cache. Unique (non disabled) key insertions thus read out-of-date
  pages from the disk leading to BUG#34062 "Maria table corruption on
  master": a DELETE in procedure viewer_sp() had deleted all rows of
  viewer_tbl2 one by one, putting index page 1 into key_del; that page
  was thrown away at start of INSERT SELECT, then the INSERT SELECT
  needed a page to insert keys, looked at key_del, found 1, read page 1
  from disk, and its out-of-date content was used to set the new value of
  key_del (crazy value of 1TB), then a later insertion needed another
  index page, tried to read page at this crazy offset and failed, leading
  to corruption mark.
  The fix is to destroy out-of-date pages and make the state consistent
  with that, i.e. call maria_delete_all_rows().
storage/maria/ma_blockrec.c:
  Special hook for UNDO_BULK_INSERT
storage/maria/ma_blockrec.h:
  special hook for UNDO_BULK_INSERT
storage/maria/ma_check.c:
  Fix for BUG#34114 "maria_chk reports false error when several tables on
  command-line": if the Nth (on the command line) table was BLOCK_RECORD
  it would start checks by using the param->record_checksum computed by
  checks of table N-1.
storage/maria/ma_delete_all.c:
  comment
storage/maria/ma_loghandler.c:
  special hook for UNDO_BULK_INSERT
storage/maria/ma_page.c:
  comment
storage/maria/ma_pagecache.c:
  page number is 5 bytes in checkpoint record now (allows bigger tables)
storage/maria/ma_recovery.c:
  page number is 5 bytes in checkpoint record now
storage/maria/ma_recovery_util.c:
  page number is 5 bytes now
storage/maria/ma_write.c:
  typo
mysql-test/r/maria-recovery-big.result:
  result is correct
mysql-test/t/maria-recovery-big-master.opt:
  usual options for recovery tests
mysql-test/t/maria-recovery-big.test:
  Moving out the big blob test to a -big test (it exhausts memory when
  using /dev/shm on certain machines)
parent 8863bf88
set global maria_log_file_size=4294967295;
drop database if exists mysqltest;
create database mysqltest;
use mysqltest;
* TEST of recovery with blobs
* shut down mysqld, removed logs, restarted it
use mysqltest;
set @@max_allowed_packet=32000000;
create table t1 (a int, b longtext) engine=maria table_checksum=1;
* copied t1 for feeding_recovery
insert into t1 values (1,"123456789012345678901234567890"),(2,"09876543210987654321");
flush table t1;
* copied t1 for comparison
lock table t1 write;
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
select a,length(b) from t1;
a length(b)
1 31457280
2 20971520
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
select a,length(b) from t1;
a length(b)
1 8
2 5
SET SESSION debug="+d,maria_flush_whole_log,maria_crash";
* crashing mysqld intentionally
set global maria_checkpoint_interval=1;
ERROR HY000: Lost connection to MySQL server during query
* copied t1 back for feeding_recovery
* recovery happens
check table t1 extended;
Table Op Msg_type Msg_text
mysqltest.t1 check status OK
* testing that checksum after recovery is as expected
Checksum-check
ok
use mysqltest;
drop table t1;
drop database mysqltest_for_feeding_recovery;
drop database mysqltest_for_comparison;
drop database mysqltest;
......@@ -302,80 +302,6 @@ a
1
3
drop table t1;
* TEST of recovery with blobs
* shut down mysqld, removed logs, restarted it
use mysqltest;
set @@max_allowed_packet=32000000;
create table t1 (a int, b longtext) engine=maria table_checksum=1;
* copied t1 for feeding_recovery
insert into t1 values (1,"123456789012345678901234567890"),(2,"09876543210987654321");
flush table t1;
* copied t1 for comparison
lock table t1 write;
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
select a,length(b) from t1;
a length(b)
1 31457280
2 20971520
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
select a,length(b) from t1;
a length(b)
1 8
2 5
SET SESSION debug="+d,maria_flush_whole_log,maria_crash";
* crashing mysqld intentionally
set global maria_checkpoint_interval=1;
ERROR HY000: Lost connection to MySQL server during query
* copied t1 back for feeding_recovery
* recovery happens
check table t1 extended;
Table Op Msg_type Msg_text
mysqltest.t1 check status OK
* testing that checksum after recovery is as expected
Checksum-check
ok
use mysqltest;
drop table t1;
* TEST of recovery when crash before bulk-insert-with-repair is committed
create table t1 (a varchar(100), key(a)) engine=maria;
create table t2 (a varchar(100)) engine=myisam;
......
--skip-stack-trace --skip-core-file --loose-debug-on=1
# Maria recovery test which cannot run in shared memory
# because it generates too much data, or which takes a lot of time.
--source include/not_embedded.inc
# Don't test this under valgrind, memory leaks will occur as we crash
--source include/not_valgrind.inc
# Binary must be compiled with debug for crash to occur
--source include/have_debug.inc
--source include/have_maria.inc
set global maria_log_file_size=4294967295;
--disable_warnings
drop database if exists mysqltest;
--enable_warnings
create database mysqltest;
# Include scripts can perform SQL. For it to not influence the main test
# they use a separate connection. This way if they use a DDL it would
# not autocommit in the main test.
connect (admin, 127.0.0.1, root,,mysqltest,,);
--enable_reconnect
connection default;
use mysqltest;
--enable_reconnect
#
# Test with big blobs
#
--echo * TEST of recovery with blobs
-- source include/maria_empty_logs.inc
set @@max_allowed_packet=32000000;
create table t1 (a int, b longtext) engine=maria table_checksum=1;
let $mms_tables=1;
-- source include/maria_make_snapshot_for_feeding_recovery.inc
insert into t1 values (1,"123456789012345678901234567890"),(2,"09876543210987654321");
-- source include/maria_make_snapshot_for_comparison.inc
lock table t1 write;
let $loop=20;
while ($loop)
{
update t1 set b=CONCAT(b,b);
dec $loop;
}
select a,length(b) from t1;
let $loop=22;
while ($loop)
{
update t1 set b=mid(b,1,length(b)/2);
dec $loop;
}
select a,length(b) from t1;
# we want recovery to run on the first snapshot made above
let $mvr_restore_old_snapshot=1;
let $mms_compare_physically=0;
let $mvr_debug_option="+d,maria_flush_whole_log,maria_crash";
let $mvr_crash_statement= set global maria_checkpoint_interval=1;
-- source include/maria_verify_recovery.inc
drop table t1;
# clean up everything
let $mms_purpose=feeding_recovery;
eval drop database mysqltest_for_$mms_purpose;
let $mms_purpose=comparison;
eval drop database mysqltest_for_$mms_purpose;
drop database mysqltest;
......@@ -256,41 +256,6 @@ select * from t1;
select * from t1;
drop table t1;
#
# Test with big blobs
#
--echo * TEST of recovery with blobs
-- source include/maria_empty_logs.inc
set @@max_allowed_packet=32000000;
create table t1 (a int, b longtext) engine=maria table_checksum=1;
let $mms_tables=1;
-- source include/maria_make_snapshot_for_feeding_recovery.inc
insert into t1 values (1,"123456789012345678901234567890"),(2,"09876543210987654321");
-- source include/maria_make_snapshot_for_comparison.inc
lock table t1 write;
let $loop=20;
while ($loop)
{
update t1 set b=CONCAT(b,b);
dec $loop;
}
select a,length(b) from t1;
let $loop=22;
while ($loop)
{
update t1 set b=mid(b,1,length(b)/2);
dec $loop;
}
select a,length(b) from t1;
# we want recovery to run on the first snapshot made above
let $mvr_restore_old_snapshot=1;
let $mms_compare_physically=0;
let $mvr_debug_option="+d,maria_flush_whole_log,maria_crash";
let $mvr_crash_statement= set global maria_checkpoint_interval=1;
-- source include/maria_verify_recovery.inc
drop table t1;
--echo * TEST of recovery when crash before bulk-insert-with-repair is committed
create table t1 (a varchar(100), key(a)) engine=maria;
create table t2 (a varchar(100)) engine=myisam;
......
......@@ -16,6 +16,7 @@
#include "mysys_priv.h"
#include "mysys_err.h"
#include "my_base.h"
#include <m_string.h>
#include <errno.h>
#ifdef HAVE_PREAD
#include <unistd.h>
......@@ -47,10 +48,13 @@ size_t my_pread(File Filedes, uchar *Buffer, size_t Count, my_off_t offset,
{
size_t readbytes;
int error= 0;
#ifndef DBUG_OFF
char llbuf1[22], llbuf2[22];
DBUG_ENTER("my_pread");
DBUG_PRINT("my",("fd: %d Seek: %lu Buffer: 0x%lx Count: %u MyFlags: %d",
Filedes, (ulong) offset, (long) Buffer, (uint) Count,
MyFlags));
DBUG_PRINT("my",("fd: %d Seek: %s Buffer: 0x%lx Count: %s MyFlags: %d",
Filedes, ullstr(offset, llbuf1),
(long) Buffer, ullstr(Count, llbuf2), MyFlags));
#endif
for (;;)
{
#ifndef __WIN__
......@@ -127,10 +131,13 @@ size_t my_pwrite(int Filedes, const uchar *Buffer, size_t Count,
{
size_t writenbytes, written;
uint errors;
#ifndef DBUG_OFF
char llbuf1[22], llbuf2[22];
DBUG_ENTER("my_pwrite");
DBUG_PRINT("my",("fd: %d Seek: %lu Buffer: 0x%lx Count: %u MyFlags: %d",
Filedes, (ulong) offset, (long) Buffer, (uint) Count,
MyFlags));
DBUG_PRINT("my",("fd: %d Seek: %s Buffer: 0x%lx Count: %s MyFlags: %d",
Filedes, ullstr(offset, llbuf1),
(long) Buffer, ullstr(Count, llbuf2), MyFlags));
#endif
errors= 0;
written= 0;
......
......@@ -16,6 +16,7 @@
#include "mysys_priv.h"
#include "mysys_err.h"
#include <my_base.h>
#include <m_string.h>
#include <errno.h>
/*
......@@ -36,9 +37,12 @@
size_t my_read(File Filedes, uchar *Buffer, size_t Count, myf MyFlags)
{
size_t readbytes, save_count;
#ifndef DBUG_OFF
char llbuf[22];
DBUG_ENTER("my_read");
DBUG_PRINT("my",("fd: %d Buffer: 0x%lx Count: %lu MyFlags: %d",
Filedes, (long) Buffer, (ulong) Count, MyFlags));
DBUG_PRINT("my",("fd: %d Buffer: 0x%lx Count: %s MyFlags: %d",
Filedes, (long) Buffer, ullstr(Count, llbuf), MyFlags));
#endif
save_count= Count;
for (;;)
......
......@@ -15,6 +15,7 @@
#include "mysys_priv.h"
#include "mysys_err.h"
#include <m_string.h>
#include <errno.h>
......@@ -24,9 +25,12 @@ size_t my_write(int Filedes, const uchar *Buffer, size_t Count, myf MyFlags)
{
size_t writenbytes, written;
uint errors;
#ifndef DBUG_OFF
char llbuf[22];
DBUG_ENTER("my_write");
DBUG_PRINT("my",("fd: %d Buffer: 0x%lx Count: %lu MyFlags: %d",
Filedes, (long) Buffer, (ulong) Count, MyFlags));
DBUG_PRINT("my",("fd: %d Buffer: 0x%lx Count: %s MyFlags: %d",
Filedes, (long) Buffer, ullstr(Count, llbuf), MyFlags));
#endif
errors=0; written=0;
/* The behavior of write(fd, buf, 0) is not portable */
......
......@@ -648,7 +648,7 @@ void _ma_check_print_warning(HA_CHECK *param, const char *fmt, ...)
#define BULK_INSERT_SINGLE_UNDO_AND_REPAIR 1
/**
Transactional table doing bulk insert with one single UNDO
(UNDO_BULK_INSERT) and with repair.
(UNDO_BULK_INSERT) and without repair.
*/
#define BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR 2
/**
......@@ -1749,15 +1749,16 @@ void ha_maria::start_bulk_insert(ha_rows rows)
{
bulk_insert_single_undo= BULK_INSERT_SINGLE_UNDO_AND_NO_REPAIR;
write_log_record_for_bulk_insert(file);
_ma_tmp_disable_logging_for_table(file, TRUE);
/*
Pages currently in the page cache have type PAGECACHE_LSN_PAGE, we
are not allowed to overwrite them with PAGECACHE_PLAIN_PAGE, so
throw them away. It is not losing data, because we just wrote and
forced an UNDO which will for sure empty the table if we crash.
forced an UNDO which will for sure empty the table if we crash. The
upcoming unique-key insertions however need a proper index, so we
cannot leave the corrupted on-disk index file, thus we truncate it.
*/
_ma_flush_table_files(file, MARIA_FLUSH_DATA|MARIA_FLUSH_INDEX,
FLUSH_IGNORE_CHANGED, FLUSH_IGNORE_CHANGED);
_ma_tmp_disable_logging_for_table(file, TRUE);
maria_delete_all_rows(file);
}
}
else if (!file->bulk_insert &&
......@@ -1765,11 +1766,6 @@ void ha_maria::start_bulk_insert(ha_rows rows)
{
maria_init_bulk_insert(file, thd->variables.bulk_insert_buff_size, rows);
}
/**
@todo If we have 0 records here, there is no need to log REDO/UNDO for
each data row, we can just log some special UNDO which will empty the
data file if need to rollback.
*/
}
DBUG_VOID_RETURN;
}
......
......@@ -5718,6 +5718,23 @@ my_bool write_hook_for_undo_row_update(enum translog_record_type type
}
my_bool write_hook_for_undo_bulk_insert(enum translog_record_type type
__attribute__ ((unused)),
TRN *trn, MARIA_HA *tbl_info,
LSN *lsn, void *hook_arg)
{
/*
We are going to call maria_delete_all_rows(), but without logging and
syncing, as an optimization (if we crash before commit, the UNDO will
empty; if we crash after commit, we have flushed and forced the files).
Status still needs to be reset under log mutex, in case of a concurrent
checkpoint.
*/
_ma_reset_status(tbl_info);
return write_hook_for_undo(type, trn, tbl_info, lsn, hook_arg);
}
/**
@brief Updates table's lsn_of_file_id.
......
......@@ -266,6 +266,9 @@ my_bool write_hook_for_undo_row_delete(enum translog_record_type type,
my_bool write_hook_for_undo_row_update(enum translog_record_type type,
TRN *trn, MARIA_HA *tbl_info,
LSN *lsn, void *hook_arg);
my_bool write_hook_for_undo_bulk_insert(enum translog_record_type type,
TRN *trn, MARIA_HA *tbl_info,
LSN *lsn, void *hook_arg);
my_bool write_hook_for_file_id(enum translog_record_type type,
TRN *trn, MARIA_HA *tbl_info, LSN *lsn,
void *hook_arg);
......@@ -164,10 +164,11 @@ int maria_chk_del(HA_CHECK *param, register MARIA_HA *info,
LINT_INIT(old_link);
param->record_checksum=0;
if (share->data_file_type == BLOCK_RECORD)
DBUG_RETURN(0); /* No delete links here */
param->record_checksum=0;
delete_link_length=((share->options & HA_OPTION_PACK_RECORD) ? 20 :
share->rec_reflength+1);
......
......@@ -24,6 +24,10 @@
@param info Maria handler
@note It is important that this function does not rely on the state
information, as it may be called by ma_apply_undo_bulk_insert() on an
inconsistent table left by a crash.
@return Operation status
@retval 0 ok
@retval 1 error
......
......@@ -616,7 +616,7 @@ static LOG_DESC INIT_LOGREC_INCOMPLETE_GROUP=
static LOG_DESC INIT_LOGREC_UNDO_BULK_INSERT=
{LOGRECTYPE_VARIABLE_LENGTH, 0,
LSN_STORE_SIZE + FILEID_STORE_SIZE,
NULL, write_hook_for_undo, NULL, 1,
NULL, write_hook_for_undo_bulk_insert, NULL, 1,
"undo_bulk_insert", LOGREC_LAST_IN_GROUP, NULL, NULL};
static LOG_DESC INIT_LOGREC_REDO_BITMAP_NEW_PAGE=
......
......@@ -322,6 +322,10 @@ my_off_t _ma_new(register MARIA_HA *info, int level,
pos= HA_OFFSET_ERROR;
else
{
/*
Next deleted page's number is in the header of the present page
(single linked list):
*/
share->current_key_del= mi_sizekorr(buff+share->keypage_header);
DBUG_ASSERT(share->current_key_del != share->state.key_del &&
share->current_key_del);
......
......@@ -4255,7 +4255,7 @@ my_bool pagecache_collect_changed_blocks_with_lsn(PAGECACHE *pagecache,
str->length= 8 + /* number of dirty pages */
(2 + /* table id */
1 + /* data or index file */
4 + /* pageno */
5 + /* pageno */
LSN_STORE_SIZE /* rec_lsn */
) * stored_list_size;
if (NULL == (str->str= my_malloc(str->length, MYF(MY_WME))))
......@@ -4283,10 +4283,9 @@ my_bool pagecache_collect_changed_blocks_with_lsn(PAGECACHE *pagecache,
ptr+= 2;
ptr[0]= (share->kfile.file == block->hash_link->file.file);
ptr++;
/* TODO: We should fix the code here to handle 5 byte page numbers */
DBUG_ASSERT(block->hash_link->pageno <= UINT_MAX32);
int4store(ptr, block->hash_link->pageno);
ptr+= 4;
DBUG_ASSERT(block->hash_link->pageno < ((ULL(1)) << 40));
int5store(ptr, block->hash_link->pageno);
ptr+= 5;
lsn_store(ptr, block->rec_lsn);
ptr+= LSN_STORE_SIZE;
if (block->rec_lsn != LSN_MAX)
......
......@@ -3009,8 +3009,8 @@ static LSN parse_checkpoint_record(LSN lsn)
ptr+= 2;
is_index= ptr[0];
ptr++;
page_id= uint4korr(ptr);
ptr+= 4;
page_id= uint5korr(ptr);
ptr+= 5;
rec_lsn= lsn_korr(ptr);
ptr+= LSN_STORE_SIZE;
if (new_page((is_index << 16) | table_id,
......@@ -3056,7 +3056,7 @@ static int new_page(uint32 fileid, pgcache_page_no_t pageid, LSN rec_lsn,
struct st_dirty_page *dirty_page)
{
/* serves as hash key */
dirty_page->file_and_page_id= (((uint64)fileid) << 32) | pageid;
dirty_page->file_and_page_id= (((uint64)fileid) << 40) | pageid;
dirty_page->rec_lsn= rec_lsn;
return my_hash_insert(&all_dirty_pages, (uchar *)dirty_page);
}
......
......@@ -117,13 +117,12 @@ my_bool _ma_redo_not_needed_for_page(uint16 shortid, LSN lsn,
{
/*
64-bit key is formed like this:
Most significant byte: 0
Next byte: 0 if data page, 1 if index page
Most significant byte: 0 if data page, 1 if index page
Next 2 bytes: table's short id
Next 4 bytes: page number
Next 5 bytes: page number
*/
uint64 file_and_page_id=
(((uint64)((index << 16) | shortid)) << 32) | page;
(((uint64)((index << 16) | shortid)) << 40) | page;
struct st_dirty_page *dirty_page= (struct st_dirty_page *)
hash_search(&all_dirty_pages,
(uchar *)&file_and_page_id, sizeof(file_and_page_id));
......
......@@ -877,7 +877,7 @@ int _ma_split_page(register MARIA_HA *info, register MARIA_KEYDEF *keyinfo,
MARIA_PINNED_PAGE tmp_page_link, *page_link= &tmp_page_link;
MARIA_SHARE *share= info->s;
int res;
DBUG_ENTER("maria_split_page");
DBUG_ENTER("_ma_split_page");
LINT_INIT(after_key);
DBUG_DUMP("buff", split_buff, _ma_get_page_used(share, split_buff));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment