Commit 238db632 authored by marko's avatar marko

branches/zip: Fix a bug in the merge sort in fast index creation.

Some bug still remains, because innodb-index.test will lose some
records from the clustered index after add primary key (a,b(255),c(255))
when row_merge_block_t is reduced to 8192 bytes.

row_merge(): Add the parameter "half".  Add some Valgrind instrumentation.
Note that either stream can end before the other one.

row_merge_sort(): Calculate "half" for row_merge().
parent 4bf90e53
...@@ -1299,27 +1299,26 @@ row_merge( ...@@ -1299,27 +1299,26 @@ row_merge(
dict_index_t* index, /* in: index being created */ dict_index_t* index, /* in: index being created */
merge_file_t* file, /* in/out: file containing merge_file_t* file, /* in/out: file containing
index entries */ index entries */
ulint half, /* in: half the file */
row_merge_block_t* block, /* in/out: 3 buffers */ row_merge_block_t* block, /* in/out: 3 buffers */
int* tmpfd) /* in/out: temporary file int* tmpfd) /* in/out: temporary file
handle */ handle */
{ {
ulint foffs0; /* first input offset */ ulint foffs0; /* first input offset */
ulint foffs1; /* second input offset */ ulint foffs1; /* second input offset */
ulint half; /* upper limit of foffs1 */
ulint error; /* error code */ ulint error; /* error code */
merge_file_t of; /* output file */ merge_file_t of; /* output file */
UNIV_MEM_ASSERT_W(block[0], 3 * sizeof block[0]);
of.fd = *tmpfd; of.fd = *tmpfd;
of.offset = 0; of.offset = 0;
/* Split the input file in two halves. */
half = file->offset / 2;
/* Merge blocks to the output file. */ /* Merge blocks to the output file. */
foffs0 = 0; foffs0 = 0;
foffs1 = half; foffs1 = half;
for (; foffs0 < half; foffs0++, foffs1++) { for (; foffs0 < half && foffs1 < file->offset; foffs0++, foffs1++) {
error = row_merge_blocks(index, file, block, error = row_merge_blocks(index, file, block,
&foffs0, &foffs1, &of); &foffs0, &foffs1, &of);
...@@ -1329,19 +1328,25 @@ row_merge( ...@@ -1329,19 +1328,25 @@ row_merge(
} }
/* Copy the last block, if there is one. */ /* Copy the last block, if there is one. */
while (foffs0 < half) {
if (!row_merge_read(file->fd, foffs0++, block)
|| !row_merge_write(of.fd, of.offset++, block)) {
return(DB_CORRUPTION);
}
}
while (foffs1 < file->offset) { while (foffs1 < file->offset) {
if (!row_merge_read(file->fd, foffs1++, block) if (!row_merge_read(file->fd, foffs1++, block)
|| !row_merge_write(of.fd, of.offset++, block)) { || !row_merge_write(of.fd, of.offset++, block)) {
return(DB_CORRUPTION); return(DB_CORRUPTION);
} }
UNIV_MEM_INVALID(block[0], sizeof block[0]);
} }
/* Swap file descriptors for the next pass. */ /* Swap file descriptors for the next pass. */
*tmpfd = file->fd; *tmpfd = file->fd;
*file = of; *file = of;
UNIV_MEM_INVALID(block[0], 3 * sizeof block[0]);
return(DB_SUCCESS); return(DB_SUCCESS);
} }
...@@ -1362,21 +1367,13 @@ row_merge_sort( ...@@ -1362,21 +1367,13 @@ row_merge_sort(
{ {
ulint blksz; /* block size */ ulint blksz; /* block size */
blksz = 1; for (blksz = 1; blksz < file->offset; blksz *= 2) {
ulint half = ut_2pow_round((file->offset + 1) / 2, blksz);
ulint error = row_merge(index, file, half, block, tmpfd);
for (;; blksz *= 2) {
ulint error = row_merge(index, file, block, tmpfd);
if (error != DB_SUCCESS) { if (error != DB_SUCCESS) {
return(error); return(error);
} }
if (blksz >= file->offset) {
/* everything is in a single block */
break;
}
/* Round up the file size to a multiple of blksz. */
file->offset = ut_2pow_round(file->offset - 1, blksz) + blksz;
} }
return(DB_SUCCESS); return(DB_SUCCESS);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment