Commit c126b01a authored by Bradley C. Kuszmaul's avatar Bradley C. Kuszmaul Committed by Yoni Fogel

The tests may be running. Addresses #1000, #1080, #1131.

git-svn-id: file:///svn/tokudb.1131b+1080a@6156 c7de825b-a66e-492c-adef-691d508d4ae1
parent f35926ad
......@@ -34,6 +34,12 @@ block_allocator_validate (BLOCK_ALLOCATOR ba) {
}
}
#if 0
#define VALIDATE(b) block_allocator_validate(b)
#else
#define VALIDATE(b) ((void)0)
#endif
#if 0
void
block_allocator_print (BLOCK_ALLOCATOR ba) {
......@@ -42,7 +48,7 @@ block_allocator_print (BLOCK_ALLOCATOR ba) {
printf("%" PRId64 ":%" PRId64 " ", ba->blocks_array[i].offset, ba->blocks_array[i].size);
}
printf("\n");
block_allocator_validate(ba);
VALIDATE(ba);
}
#endif
......@@ -55,6 +61,7 @@ create_block_allocator (BLOCK_ALLOCATOR *ba, u_int64_t reserve_at_beginning) {
XMALLOC_N(result->blocks_array_size, result->blocks_array);
result->next_fit_counter = 0;
*ba = result;
VALIDATE(result);
}
void
......@@ -76,6 +83,7 @@ grow_blocks_array (BLOCK_ALLOCATOR ba) {
void
block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t offset) {
u_int64_t i;
VALIDATE(ba);
assert(offset >= ba->reserve_at_beginning);
grow_blocks_array(ba);
// Just do a linear search for the block
......@@ -84,10 +92,11 @@ block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t of
// allocate it in that slot
// Don't do error checking, since we require that the blocks don't overlap.
// Slide everything over
memmove(ba->blocks_array+i, ba->blocks_array+i-1, (ba->n_blocks - i)*sizeof(struct blockpair));
memmove(ba->blocks_array+i+1, ba->blocks_array+i, (ba->n_blocks - i)*sizeof(struct blockpair));
ba->blocks_array[i].offset = offset;
ba->blocks_array[i].size = size;
ba->n_blocks++;
VALIDATE(ba);
return;
}
}
......@@ -95,6 +104,7 @@ block_allocator_alloc_block_at (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t of
ba->blocks_array[ba->n_blocks].offset = offset;
ba->blocks_array[ba->n_blocks].size = size;
ba->n_blocks++;
VALIDATE(ba);
}
void
......@@ -126,6 +136,7 @@ block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t *offs
ba->n_blocks++;
ba->next_fit_counter = blocknum;
*offset = answer_offset;
VALIDATE(ba);
return;
}
// It didn't fit anywhere, so fit it on the end.
......@@ -135,6 +146,7 @@ block_allocator_alloc_block (BLOCK_ALLOCATOR ba, u_int64_t size, u_int64_t *offs
bp->size = size;
ba->n_blocks++;
*offset = answer_offset;
VALIDATE(ba);
}
static int64_t
......@@ -142,6 +154,7 @@ find_block (BLOCK_ALLOCATOR ba, u_int64_t offset)
// Find the index in the blocks array that has a particular offset. Requires that the block exist.
// Use binary search so it runs fast.
{
VALIDATE(ba);
if (ba->n_blocks==1) {
assert(ba->blocks_array[0].offset == offset);
return 0;
......@@ -165,10 +178,12 @@ find_block (BLOCK_ALLOCATOR ba, u_int64_t offset)
void
block_allocator_free_block (BLOCK_ALLOCATOR ba, u_int64_t offset) {
VALIDATE(ba);
int64_t bn = find_block(ba, offset);
assert(bn>=0); // we require that there is a block with that offset. Might as well abort if no such block exists.
memmove(&ba->blocks_array[bn], &ba->blocks_array[bn+1], (ba->n_blocks-bn-1) * sizeof(struct blockpair));
ba->n_blocks--;
VALIDATE(ba);
}
u_int64_t
......
......@@ -179,7 +179,7 @@ struct brt {
/* serialization code */
void toku_serialize_brtnode_to(int fd, BLOCKNUM, BRTNODE node, BRT brt);
int toku_deserialize_brtnode_from (int fd, BLOCKNUM off, u_int32_t /*fullhash*/, BRTNODE *brtnode, int tree_node_size);
int toku_deserialize_brtnode_from (int fd, BLOCKNUM off, u_int32_t /*fullhash*/, BRTNODE *brtnode, struct brt_header *h);
unsigned int toku_serialize_brtnode_size(BRTNODE node); /* How much space will it take? */
int toku_keycompare (bytevec key1, ITEMLEN key1len, bytevec key2, ITEMLEN key2len);
......
......@@ -255,10 +255,10 @@ void toku_serialize_brtnode_to (int fd, BLOCKNUM blocknum, BRTNODE node, BRT brt
{
// If the node has never been written, then write the whole buffer, including the zeros
assert(blocknum.b>=0);
printf("%s:%d brt=%p\n", __FILE__, __LINE__, brt);
printf("%s:%d translated_blocknum_limit=%lu blocknum.b=%lu\n", __FILE__, __LINE__, brt->h->translated_blocknum_limit, blocknum.b);
printf("%s:%d allocator=%p\n", __FILE__, __LINE__, brt->h->block_allocator);
printf("%s:%d bt=%p\n", __FILE__, __LINE__, brt->h->block_translation);
//printf("%s:%d brt=%p\n", __FILE__, __LINE__, brt);
//printf("%s:%d translated_blocknum_limit=%lu blocknum.b=%lu\n", __FILE__, __LINE__, brt->h->translated_blocknum_limit, blocknum.b);
//printf("%s:%d allocator=%p\n", __FILE__, __LINE__, brt->h->block_allocator);
//printf("%s:%d bt=%p\n", __FILE__, __LINE__, brt->h->block_translation);
if (brt->h->translated_blocknum_limit <= (u_int64_t)blocknum.b) {
if (brt->h->block_translation == 0) assert(brt->h->translated_blocknum_limit==0);
u_int64_t new_limit = blocknum.b + 1;
......@@ -276,6 +276,7 @@ void toku_serialize_brtnode_to (int fd, BLOCKNUM blocknum, BRTNODE node, BRT brt
brt->h->block_translation[blocknum.b].diskoff = 0;
brt->h->block_translation[blocknum.b].size = 0;
}
brt->h->dirty = 1; // Allocating a block dirties the header.
size_t n_to_write = uncompressed_magic_len + compression_header_len + compressed_len;
u_int64_t offset;
block_allocator_alloc_block(brt->h->block_allocator, n_to_write, &offset);
......@@ -292,8 +293,9 @@ void toku_serialize_brtnode_to (int fd, BLOCKNUM blocknum, BRTNODE node, BRT brt
toku_free(compressed_buf);
}
int toku_deserialize_brtnode_from (int fd, BLOCKNUM blocknum, u_int32_t fullhash, BRTNODE *brtnode, int tree_node_size) {
DISKOFF offset = blocknum.b * tree_node_size;
int toku_deserialize_brtnode_from (int fd, BLOCKNUM blocknum, u_int32_t fullhash, BRTNODE *brtnode, struct brt_header *h) {
assert(0 <= blocknum.b && (u_int64_t)blocknum.b < h->translated_blocknum_limit);
DISKOFF offset = h->block_translation[blocknum.b].diskoff;
TAGMALLOC(BRTNODE, result);
struct rbuf rc;
int i;
......@@ -328,7 +330,7 @@ int toku_deserialize_brtnode_from (int fd, BLOCKNUM blocknum, u_int32_t fullhash
{
ssize_t rlen=pread(fd, compressed_data, compressed_size, offset+uncompressed_magic_len + compression_header_len);
//printf("%s:%d pread->%d datasize=%d\n", __FILE__, __LINE__, r, datasize);
//printf("%s:%d pread->%d offset=%ld datasize=%d\n", __FILE__, __LINE__, r, offset, compressed_size + uncompressed_magic_len + compression_header_len);
assert((size_t)rlen==compressed_size);
//printf("Got %d %d %d %d\n", rc.buf[0], rc.buf[1], rc.buf[2], rc.buf[3]);
}
......@@ -655,11 +657,11 @@ int toku_serialize_brt_header_to (int fd, struct brt_header *h) {
{
struct wbuf w;
u_int64_t size = 4 + h->translated_blocknum_limit * 16; // 4 for the checksum
printf("%s:%d writing translation table of size %ld\n", __FILE__, __LINE__, size);
//printf("%s:%d writing translation table of size %ld at %ld\n", __FILE__, __LINE__, size, h->block_translation_address_on_disk);
wbuf_init(&w, toku_malloc(size), size);
u_int64_t i;
for (i=0; i<h->translated_blocknum_limit; i++) {
printf("%s:%d %ld,%ld\n", __FILE__, __LINE__, h->block_translation[i].diskoff, h->block_translation[i].size);
//printf("%s:%d %ld,%ld\n", __FILE__, __LINE__, h->block_translation[i].diskoff, h->block_translation[i].size);
wbuf_ulonglong(&w, h->block_translation[i].diskoff);
wbuf_ulonglong(&w, h->block_translation[i].size);
}
......@@ -701,7 +703,7 @@ int deserialize_brtheader (u_int32_t size, int fd, DISKOFF off, struct brt_heade
h->block_translation_address_on_disk = rbuf_diskoff(&rc);
// Set up the the block translation buffer.
create_block_allocator(&h->block_allocator, h->nodesize);
printf("%s:%d translated_blocknum_limit=%ld, block_translation_address_on_disk=%ld\n", __FILE__, __LINE__, h->translated_blocknum_limit, h->block_translation_address_on_disk);
// printf("%s:%d translated_blocknum_limit=%ld, block_translation_address_on_disk=%ld\n", __FILE__, __LINE__, h->translated_blocknum_limit, h->block_translation_address_on_disk);
if (h->block_translation_address_on_disk == 0) {
h->block_translation = 0;
} else {
......@@ -716,7 +718,7 @@ int deserialize_brtheader (u_int32_t size, int fd, DISKOFF off, struct brt_heade
// check the checksum
u_int32_t x1764 = x1764_memory(tbuf, h->block_translation_size_on_disk - 4);
u_int64_t offset = h->block_translation_size_on_disk - 4;
printf("%s:%d read from %ld (x1764 offset=%ld) size=%ld\n", __FILE__, __LINE__, h->block_translation_address_on_disk, offset, h->block_translation_size_on_disk);
// printf("%s:%d read from %ld (x1764 offset=%ld) size=%ld\n", __FILE__, __LINE__, h->block_translation_address_on_disk, offset, h->block_translation_size_on_disk);
u_int32_t stored_x1764 = ntohl(*(int*)(tbuf + offset));
assert(x1764 == stored_x1764);
}
......@@ -732,6 +734,7 @@ int deserialize_brtheader (u_int32_t size, int fd, DISKOFF off, struct brt_heade
h->block_translation[i].size = rbuf_diskoff(&rt);
if (h->block_translation[i].size > 0)
block_allocator_alloc_block_at(h->block_allocator, h->block_translation[i].size, h->block_translation[i].diskoff);
//printf("%s:%d %ld %ld\n", __FILE__, __LINE__, h->block_translation[i].diskoff, h->block_translation[i].size);
}
toku_free(tbuf);
}
......
......@@ -189,7 +189,7 @@ int toku_brtnode_fetch_callback (CACHEFILE cachefile, BLOCKNUM nodename, u_int32
assert(extraargs);
BRT brt = extraargs;
BRTNODE *result=(BRTNODE*)brtnode_pv;
int r = toku_deserialize_brtnode_from(toku_cachefile_fd(cachefile), nodename, fullhash, result, brt->nodesize);
int r = toku_deserialize_brtnode_from(toku_cachefile_fd(cachefile), nodename, fullhash, result, brt->h);
if (r == 0) {
*sizep = brtnode_memory_size(*result);
*written_lsn = (*result)->disk_lsn;
......@@ -2157,7 +2157,7 @@ static int brt_alloc_init_header(BRT t, const char *dbname, TOKUTXN txn) {
t->h->block_translation = 0;
t->h->block_translation_size_on_disk = 0;
t->h->block_translation_address_on_disk = 0;
printf("%s:%d translated_blocknum_limit=%ld, block_translation_address_on_disk=%ld\n", __FILE__, __LINE__, t->h->translated_blocknum_limit, t->h->block_translation_address_on_disk);
// printf("%s:%d translated_blocknum_limit=%ld, block_translation_address_on_disk=%ld\n", __FILE__, __LINE__, t->h->translated_blocknum_limit, t->h->block_translation_address_on_disk);
create_block_allocator(&t->h->block_allocator, t->nodesize);
toku_fifo_create(&t->h->fifo);
t->root_put_counter = global_root_put_counter++;
......@@ -2431,7 +2431,7 @@ int toku_close_brt (BRT brt, TOKULOGGER logger) {
}
assert(0==toku_cachefile_count_pinned(brt->cf, 1)); // For the brt, the pinned count should be zero.
//printf("%s:%d closing cachetable\n", __FILE__, __LINE__);
printf("%s:%d brt=%p ,brt->h=%p\n", __FILE__, __LINE__, brt, brt->h);
// printf("%s:%d brt=%p ,brt->h=%p\n", __FILE__, __LINE__, brt, brt->h);
if ((r = toku_cachefile_close(&brt->cf, logger))!=0) return r;
}
if (brt->database_name) toku_free(brt->database_name);
......
......@@ -82,9 +82,9 @@ int print_le(OMTVALUE lev, u_int32_t UU(idx), void *UU(v)) {
return 0;
}
void dump_node (int f, BLOCKNUM blocknum, int tree_node_size) {
void dump_node (int f, BLOCKNUM blocknum, struct brt_header *h) {
BRTNODE n;
int r = toku_deserialize_brtnode_from (f, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, tree_node_size);
int r = toku_deserialize_brtnode_from (f, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, h);
assert(r==0);
assert(n!=0);
printf("brtnode\n");
......@@ -181,7 +181,7 @@ int main (int argc, const char *argv[]) {
dump_header(f, &h);
BLOCKNUM blocknum;
for (blocknum.b=1; blocknum.b<h->unused_blocks.b; blocknum.b++) {
dump_node(f, blocknum, h->nodesize);
dump_node(f, blocknum, h);
}
toku_brtheader_free(h);
toku_malloc_cleanup();
......
......@@ -71,7 +71,7 @@ static void test_serialize(void) {
toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt); assert(r==0);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, nodesize);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, brt_h);
assert(r==0);
assert(dn->thisnodename.b==20);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment