Commit d2c0e1bc authored by Rich Prohaska's avatar Rich Prohaska Committed by Yoni Fogel

#2966 use assert_zero in some of the newbrt files refs[t:2966]

git-svn-id: file:///svn/toku/tokudb@24717 c7de825b-a66e-492c-adef-691d508d4ae1
parent b09778dd
...@@ -92,6 +92,9 @@ endif ...@@ -92,6 +92,9 @@ endif
try-assert0.tdbrun: try-assert0 try-assert0.tdbrun: try-assert0
./$< 2> /dev/null $(INVERTER) $(SUMMARIZE_SHOULD_FAIL) ./$< 2> /dev/null $(INVERTER) $(SUMMARIZE_SHOULD_FAIL)
try-assert-zero.tdbrun: try-assert-zero
./$< 2> /dev/null $(INVERTER) $(SUMMARIZE_SHOULD_FAIL)
clean: clean:
rm -rf $(TARGETS) *.check.output *.check.valgrind pwrite4g.data testdir dir.*.c rm -rf $(TARGETS) *.check.output *.check.valgrind pwrite4g.data testdir dir.*.c
......
#include <stdio.h>
#include <toku_assert.h>
int main(void) {
int result = 42;
assert_zero(result);
return 0;
}
...@@ -33,7 +33,7 @@ static struct toku_thread_pool *brt_pool = NULL; ...@@ -33,7 +33,7 @@ static struct toku_thread_pool *brt_pool = NULL;
int int
toku_brt_serialize_init(void) { toku_brt_serialize_init(void) {
num_cores = toku_os_get_number_active_processors(); num_cores = toku_os_get_number_active_processors();
int r = toku_thread_pool_create(&brt_pool, num_cores); assert(r == 0); int r = toku_thread_pool_create(&brt_pool, num_cores); lazy_assert_zero(r);
return 0; return 0;
} }
...@@ -49,29 +49,27 @@ static int pwrite_is_locked=0; ...@@ -49,29 +49,27 @@ static int pwrite_is_locked=0;
int int
toku_pwrite_lock_init(void) { toku_pwrite_lock_init(void) {
int r = toku_pthread_mutex_init(&pwrite_mutex, NULL); assert(r == 0); int r = toku_pthread_mutex_init(&pwrite_mutex, NULL); resource_assert_zero(r);
return r; return r;
} }
int int
toku_pwrite_lock_destroy(void) { toku_pwrite_lock_destroy(void) {
int r = toku_pthread_mutex_destroy(&pwrite_mutex); assert(r == 0); int r = toku_pthread_mutex_destroy(&pwrite_mutex); resource_assert_zero(r);
return r; return r;
} }
static inline void static inline void
lock_for_pwrite (void) { lock_for_pwrite (void) {
// Locks the pwrite_mutex. // Locks the pwrite_mutex.
int r = toku_pthread_mutex_lock(&pwrite_mutex); int r = toku_pthread_mutex_lock(&pwrite_mutex); resource_assert_zero(r);
assert(r==0);
pwrite_is_locked = 1; pwrite_is_locked = 1;
} }
static inline void static inline void
unlock_for_pwrite (void) { unlock_for_pwrite (void) {
pwrite_is_locked = 0; pwrite_is_locked = 0;
int r = toku_pthread_mutex_unlock(&pwrite_mutex); int r = toku_pthread_mutex_unlock(&pwrite_mutex); resource_assert_zero(r);
assert(r==0);
} }
...@@ -99,22 +97,22 @@ toku_maybe_truncate_cachefile (CACHEFILE cf, int fd, u_int64_t size_used) ...@@ -99,22 +97,22 @@ toku_maybe_truncate_cachefile (CACHEFILE cf, int fd, u_int64_t size_used)
if (toku_cachefile_is_dev_null_unlocked(cf)) goto done; if (toku_cachefile_is_dev_null_unlocked(cf)) goto done;
{ {
int r = toku_os_get_file_size(fd, &file_size); int r = toku_os_get_file_size(fd, &file_size);
assert(r==0); lazy_assert_zero(r);
assert(file_size >= 0); invariant(file_size >= 0);
} }
// If file space is overallocated by at least 32M // If file space is overallocated by at least 32M
if ((u_int64_t)file_size >= size_used + (2*FILE_CHANGE_INCREMENT)) { if ((u_int64_t)file_size >= size_used + (2*FILE_CHANGE_INCREMENT)) {
lock_for_pwrite(); lock_for_pwrite();
{ {
int r = toku_os_get_file_size(fd, &file_size); int r = toku_os_get_file_size(fd, &file_size);
assert(r==0); lazy_assert_zero(r);
assert(file_size >= 0); invariant(file_size >= 0);
} }
if ((u_int64_t)file_size >= size_used + (2*FILE_CHANGE_INCREMENT)) { if ((u_int64_t)file_size >= size_used + (2*FILE_CHANGE_INCREMENT)) {
toku_off_t new_size = alignup64(size_used, (2*FILE_CHANGE_INCREMENT)); //Truncate to new size_used. toku_off_t new_size = alignup64(size_used, (2*FILE_CHANGE_INCREMENT)); //Truncate to new size_used.
assert(new_size < file_size); invariant(new_size < file_size);
int r = toku_cachefile_truncate(cf, new_size); int r = toku_cachefile_truncate(cf, new_size);
assert(r==0); lazy_assert_zero(r);
} }
unlock_for_pwrite(); unlock_for_pwrite();
} }
...@@ -140,15 +138,15 @@ maybe_preallocate_in_file (int fd, u_int64_t size) ...@@ -140,15 +138,15 @@ maybe_preallocate_in_file (int fd, u_int64_t size)
int the_errno = errno; int the_errno = errno;
fprintf(stderr, "%s:%d fd=%d size=%"PRIu64"r=%d errno=%d\n", __FUNCTION__, __LINE__, fd, size, r, the_errno); fflush(stderr); fprintf(stderr, "%s:%d fd=%d size=%"PRIu64"r=%d errno=%d\n", __FUNCTION__, __LINE__, fd, size, r, the_errno); fflush(stderr);
} }
assert(r==0); lazy_assert_zero(r);
} }
assert(file_size >= 0); invariant(file_size >= 0);
if ((u_int64_t)file_size < size) { if ((u_int64_t)file_size < size) {
const int N = umin64(size, FILE_CHANGE_INCREMENT); // Double the size of the file, or add 16MiB, whichever is less. const int N = umin64(size, FILE_CHANGE_INCREMENT); // Double the size of the file, or add 16MiB, whichever is less.
char *MALLOC_N(N, wbuf); char *MALLOC_N(N, wbuf);
memset(wbuf, 0, N); memset(wbuf, 0, N);
toku_off_t start_write = alignup64(file_size, 4096); toku_off_t start_write = alignup64(file_size, 4096);
assert(start_write >= file_size); invariant(start_write >= file_size);
toku_os_full_pwrite(fd, wbuf, N, start_write); toku_os_full_pwrite(fd, wbuf, N, start_write);
toku_free(wbuf); toku_free(wbuf);
} }
...@@ -160,10 +158,10 @@ toku_full_pwrite_extend (int fd, const void *buf, size_t count, toku_off_t offse ...@@ -160,10 +158,10 @@ toku_full_pwrite_extend (int fd, const void *buf, size_t count, toku_off_t offse
// requires that the pwrite has been locked // requires that the pwrite has been locked
// On failure, this does not return (an assertion fails or something). // On failure, this does not return (an assertion fails or something).
{ {
assert(pwrite_is_locked); invariant(pwrite_is_locked);
{ {
int r = maybe_preallocate_in_file(fd, offset+count); int r = maybe_preallocate_in_file(fd, offset+count);
assert(r==0); lazy_assert_zero(r);
} }
toku_os_full_pwrite(fd, buf, count, offset); toku_os_full_pwrite(fd, buf, count, offset);
} }
...@@ -208,7 +206,7 @@ toku_serialize_brtnode_size_slow (BRTNODE node) { ...@@ -208,7 +206,7 @@ toku_serialize_brtnode_size_slow (BRTNODE node) {
} }
size += (8+4+4+1+3*8)*(node->u.n.n_children); /* For each child, a child offset, a count for the number of hash table entries, the subtree fingerprint, and 3*8 for the subtree estimates and 1 for the exact bit for the estimates. */ size += (8+4+4+1+3*8)*(node->u.n.n_children); /* For each child, a child offset, a count for the number of hash table entries, the subtree fingerprint, and 3*8 for the subtree estimates and 1 for the exact bit for the estimates. */
int n_buffers = node->u.n.n_children; int n_buffers = node->u.n.n_children;
assert(0 <= n_buffers && n_buffers < TREE_FANOUT+1); invariant(0 <= n_buffers && n_buffers < TREE_FANOUT+1);
for (int i=0; i< n_buffers; i++) { for (int i=0; i< n_buffers; i++) {
FIFO_ITERATE(BNC_BUFFER(node,i), FIFO_ITERATE(BNC_BUFFER(node,i),
key, keylen, key, keylen,
...@@ -217,14 +215,14 @@ toku_serialize_brtnode_size_slow (BRTNODE node) { ...@@ -217,14 +215,14 @@ toku_serialize_brtnode_size_slow (BRTNODE node) {
(hsize+=BRT_CMD_OVERHEAD+KEY_VALUE_OVERHEAD+keylen+datalen+ (hsize+=BRT_CMD_OVERHEAD+KEY_VALUE_OVERHEAD+keylen+datalen+
xids_get_serialize_size(xids))); xids_get_serialize_size(xids)));
} }
assert(hsize==node->u.n.n_bytes_in_buffers); invariant(hsize==node->u.n.n_bytes_in_buffers);
assert(csize==node->u.n.totalchildkeylens); invariant(csize==node->u.n.totalchildkeylens);
size += node->u.n.n_children*stored_sub_block_map_size; size += node->u.n.n_children*stored_sub_block_map_size;
return size+hsize+csize; return size+hsize+csize;
} else { } else {
unsigned int hsize=0; unsigned int hsize=0;
toku_omt_iterate(node->u.l.buffer, addupsize, &hsize); toku_omt_iterate(node->u.l.buffer, addupsize, &hsize);
assert(hsize==node->u.l.n_bytes_in_buffer); invariant(hsize==node->u.l.n_bytes_in_buffer);
hsize += 4; // add n entries in buffer table hsize += 4; // add n entries in buffer table
hsize += 3*8; // add the three leaf stats, but no exact bit hsize += 3*8; // add the three leaf stats, but no exact bit
size += 4 + 1*stored_sub_block_map_size; // one partition size += 4 + 1*stored_sub_block_map_size; // one partition
...@@ -236,12 +234,12 @@ toku_serialize_brtnode_size_slow (BRTNODE node) { ...@@ -236,12 +234,12 @@ toku_serialize_brtnode_size_slow (BRTNODE node) {
unsigned int unsigned int
toku_serialize_brtnode_size (BRTNODE node) { toku_serialize_brtnode_size (BRTNODE node) {
unsigned int result = node_header_overhead + extended_node_header_overhead; unsigned int result = node_header_overhead + extended_node_header_overhead;
assert(sizeof(toku_off_t)==8); invariant(sizeof(toku_off_t)==8);
if (node->height > 0) { if (node->height > 0) {
result += 4; /* subtree fingerpirnt */ result += 4; /* subtree fingerpirnt */
result += 4; /* n_children */ result += 4; /* n_children */
result += 4*(node->u.n.n_children-1); /* key lengths*/ result += 4*(node->u.n.n_children-1); /* key lengths*/
assert(node->u.n.totalchildkeylens < (1<<30)); invariant(node->u.n.totalchildkeylens < (1<<30));
result += node->u.n.totalchildkeylens; /* the lengths of the pivot keys, without their key lengths. */ result += node->u.n.totalchildkeylens; /* the lengths of the pivot keys, without their key lengths. */
result += (8+4+4+1+3*8)*(node->u.n.n_children); /* For each child, a child offset, a count for the number of hash table entries, the subtree fingerprint, and 3*8 for the subtree estimates and one for the exact bit. */ result += (8+4+4+1+3*8)*(node->u.n.n_children); /* For each child, a child offset, a count for the number of hash table entries, the subtree fingerprint, and 3*8 for the subtree estimates and one for the exact bit. */
result += node->u.n.n_bytes_in_buffers; result += node->u.n.n_bytes_in_buffers;
...@@ -255,7 +253,7 @@ toku_serialize_brtnode_size (BRTNODE node) { ...@@ -255,7 +253,7 @@ toku_serialize_brtnode_size (BRTNODE node) {
if (toku_memory_check) { if (toku_memory_check) {
unsigned int slowresult = toku_serialize_brtnode_size_slow(node); unsigned int slowresult = toku_serialize_brtnode_size_slow(node);
if (result!=slowresult) printf("%s:%d result=%u slowresult=%u\n", __FILE__, __LINE__, result, slowresult); if (result!=slowresult) printf("%s:%d result=%u slowresult=%u\n", __FILE__, __LINE__, result, slowresult);
assert(result==slowresult); invariant(result==slowresult);
} }
return result; return result;
} }
...@@ -272,7 +270,7 @@ serialize_node_header(BRTNODE node, struct wbuf *wbuf) { ...@@ -272,7 +270,7 @@ serialize_node_header(BRTNODE node, struct wbuf *wbuf) {
wbuf_nocrc_literal_bytes(wbuf, "tokuleaf", 8); wbuf_nocrc_literal_bytes(wbuf, "tokuleaf", 8);
else else
wbuf_nocrc_literal_bytes(wbuf, "tokunode", 8); wbuf_nocrc_literal_bytes(wbuf, "tokunode", 8);
assert(node->layout_version == BRT_LAYOUT_VERSION); invariant(node->layout_version == BRT_LAYOUT_VERSION);
wbuf_nocrc_int(wbuf, node->layout_version); wbuf_nocrc_int(wbuf, node->layout_version);
wbuf_nocrc_int(wbuf, node->layout_version_original); wbuf_nocrc_int(wbuf, node->layout_version_original);
...@@ -291,7 +289,7 @@ serialize_node_header(BRTNODE node, struct wbuf *wbuf) { ...@@ -291,7 +289,7 @@ serialize_node_header(BRTNODE node, struct wbuf *wbuf) {
static void static void
serialize_nonleaf(BRTNODE node, int n_sub_blocks, struct sub_block sub_block[], struct wbuf *wbuf) { serialize_nonleaf(BRTNODE node, int n_sub_blocks, struct sub_block sub_block[], struct wbuf *wbuf) {
// serialize the nonleaf header // serialize the nonleaf header
assert(node->u.n.n_children>0); invariant(node->u.n.n_children>0);
// Local fingerprint is not actually stored while in main memory. Must calculate it. // Local fingerprint is not actually stored while in main memory. Must calculate it.
// Subtract the child fingerprints from the subtree fingerprint to get the local fingerprint. // Subtract the child fingerprints from the subtree fingerprint to get the local fingerprint.
{ {
...@@ -325,7 +323,7 @@ serialize_nonleaf(BRTNODE node, int n_sub_blocks, struct sub_block sub_block[], ...@@ -325,7 +323,7 @@ serialize_nonleaf(BRTNODE node, int n_sub_blocks, struct sub_block sub_block[],
size_t offset = wbuf_get_woffset(wbuf) - node_header_overhead + node->u.n.n_children * stored_sub_block_map_size; size_t offset = wbuf_get_woffset(wbuf) - node_header_overhead + node->u.n.n_children * stored_sub_block_map_size;
for (int i = 0; i < node->u.n.n_children; i++) { for (int i = 0; i < node->u.n.n_children; i++) {
int idx = get_sub_block_index(n_sub_blocks, sub_block, offset); int idx = get_sub_block_index(n_sub_blocks, sub_block, offset);
assert(idx >= 0); invariant(idx >= 0);
size_t size = sizeof (u_int32_t) + BNC_NBYTESINBUF(node, i); // # elements + size of the elements size_t size = sizeof (u_int32_t) + BNC_NBYTESINBUF(node, i); // # elements + size of the elements
sub_block_map_init(&child_buffer_map[i], idx, offset, size); sub_block_map_init(&child_buffer_map[i], idx, offset, size);
offset += size; offset += size;
...@@ -341,11 +339,11 @@ serialize_nonleaf(BRTNODE node, int n_sub_blocks, struct sub_block sub_block[], ...@@ -341,11 +339,11 @@ serialize_nonleaf(BRTNODE node, int n_sub_blocks, struct sub_block sub_block[],
u_int32_t check_local_fingerprint = 0; u_int32_t check_local_fingerprint = 0;
for (int i = 0; i < n_buffers; i++) { for (int i = 0; i < n_buffers; i++) {
//printf("%s:%d p%d=%p n_entries=%d\n", __FILE__, __LINE__, i, node->mdicts[i], mdict_n_entries(node->mdicts[i])); //printf("%s:%d p%d=%p n_entries=%d\n", __FILE__, __LINE__, i, node->mdicts[i], mdict_n_entries(node->mdicts[i]));
// assert(child_buffer_map[i].offset == wbuf_get_woffset(wbuf)); // invariant(child_buffer_map[i].offset == wbuf_get_woffset(wbuf));
wbuf_nocrc_int(wbuf, toku_fifo_n_entries(BNC_BUFFER(node,i))); wbuf_nocrc_int(wbuf, toku_fifo_n_entries(BNC_BUFFER(node,i)));
FIFO_ITERATE(BNC_BUFFER(node,i), key, keylen, data, datalen, type, xids, FIFO_ITERATE(BNC_BUFFER(node,i), key, keylen, data, datalen, type, xids,
{ {
assert(type>=0 && type<256); invariant(type>=0 && type<256);
wbuf_nocrc_char(wbuf, (unsigned char)type); wbuf_nocrc_char(wbuf, (unsigned char)type);
wbuf_nocrc_xids(wbuf, xids); wbuf_nocrc_xids(wbuf, xids);
wbuf_nocrc_bytes(wbuf, key, keylen); wbuf_nocrc_bytes(wbuf, key, keylen);
...@@ -355,7 +353,7 @@ serialize_nonleaf(BRTNODE node, int n_sub_blocks, struct sub_block sub_block[], ...@@ -355,7 +353,7 @@ serialize_nonleaf(BRTNODE node, int n_sub_blocks, struct sub_block sub_block[],
} }
//printf("%s:%d check_local_fingerprint=%8x\n", __FILE__, __LINE__, check_local_fingerprint); //printf("%s:%d check_local_fingerprint=%8x\n", __FILE__, __LINE__, check_local_fingerprint);
if (check_local_fingerprint!=node->local_fingerprint) printf("%s:%d node=%" PRId64 " fingerprint expected=%08x actual=%08x\n", __FILE__, __LINE__, node->thisnodename.b, check_local_fingerprint, node->local_fingerprint); if (check_local_fingerprint!=node->local_fingerprint) printf("%s:%d node=%" PRId64 " fingerprint expected=%08x actual=%08x\n", __FILE__, __LINE__, node->thisnodename.b, check_local_fingerprint, node->local_fingerprint);
assert(check_local_fingerprint==node->local_fingerprint); invariant(check_local_fingerprint==node->local_fingerprint);
} }
} }
...@@ -382,14 +380,14 @@ serialize_leaf(BRTNODE node, int n_sub_blocks, struct sub_block sub_block[], str ...@@ -382,14 +380,14 @@ serialize_leaf(BRTNODE node, int n_sub_blocks, struct sub_block sub_block[], str
for (int i = 0; i < npartitions; i++) { for (int i = 0; i < npartitions; i++) {
size_t offset = wbuf_get_woffset(wbuf) - node_header_overhead; size_t offset = wbuf_get_woffset(wbuf) - node_header_overhead;
int idx = get_sub_block_index(n_sub_blocks, sub_block, offset); int idx = get_sub_block_index(n_sub_blocks, sub_block, offset);
assert(idx >= 0); invariant(idx >= 0);
size_t size = sizeof (u_int32_t) + node->u.l.n_bytes_in_buffer; // # in partition + size of partition size_t size = sizeof (u_int32_t) + node->u.l.n_bytes_in_buffer; // # in partition + size of partition
sub_block_map_init(&part_map[i], idx, offset, size); sub_block_map_init(&part_map[i], idx, offset, size);
} }
// RFP serialize the partition pivots // RFP serialize the partition pivots
for (int i = 0; i < npartitions-1; i++) { for (int i = 0; i < npartitions-1; i++) {
assert(0); lazy_assert(0);
} }
// RFP serialize the partition maps // RFP serialize the partition maps
...@@ -413,8 +411,8 @@ serialize_node(BRTNODE node, char *buf, size_t calculated_size, int n_sub_blocks ...@@ -413,8 +411,8 @@ serialize_node(BRTNODE node, char *buf, size_t calculated_size, int n_sub_blocks
else else
serialize_leaf(node, n_sub_blocks, sub_block, &wb); serialize_leaf(node, n_sub_blocks, sub_block, &wb);
assert(wb.ndone == wb.size); invariant(wb.ndone == wb.size);
assert(calculated_size==wb.ndone); invariant(calculated_size==wb.ndone);
} }
...@@ -477,8 +475,8 @@ toku_serialize_brtnode_to_memory (BRTNODE node, int UU(n_workitems), int UU(n_th ...@@ -477,8 +475,8 @@ toku_serialize_brtnode_to_memory (BRTNODE node, int UU(n_workitems), int UU(n_th
int n_sub_blocks = 0, sub_block_size = 0; int n_sub_blocks = 0, sub_block_size = 0;
size_t data_size = calculated_size - node_header_overhead; size_t data_size = calculated_size - node_header_overhead;
choose_sub_block_size(data_size, max_sub_blocks, &sub_block_size, &n_sub_blocks); choose_sub_block_size(data_size, max_sub_blocks, &sub_block_size, &n_sub_blocks);
assert(0 < n_sub_blocks && n_sub_blocks <= max_sub_blocks); invariant(0 < n_sub_blocks && n_sub_blocks <= max_sub_blocks);
assert(sub_block_size > 0); invariant(sub_block_size > 0);
// set the initial sub block size for all of the sub blocks // set the initial sub block size for all of the sub blocks
struct sub_block sub_block[n_sub_blocks]; struct sub_block sub_block[n_sub_blocks];
...@@ -492,7 +490,7 @@ toku_serialize_brtnode_to_memory (BRTNODE node, int UU(n_workitems), int UU(n_th ...@@ -492,7 +490,7 @@ toku_serialize_brtnode_to_memory (BRTNODE node, int UU(n_workitems), int UU(n_th
result = errno; result = errno;
else { else {
//toku_verify_counts(node); //toku_verify_counts(node);
//assert(size>0); //invariant(size>0);
//printf("%s:%d serializing %lld w height=%d p0=%p\n", __FILE__, __LINE__, off, node->height, node->mdicts[0]); //printf("%s:%d serializing %lld w height=%d p0=%p\n", __FILE__, __LINE__, off, node->height, node->mdicts[0]);
// serialize the node into buf // serialize the node into buf
...@@ -519,7 +517,7 @@ toku_serialize_brtnode_to (int fd, BLOCKNUM blocknum, BRTNODE node, struct brt_h ...@@ -519,7 +517,7 @@ toku_serialize_brtnode_to (int fd, BLOCKNUM blocknum, BRTNODE node, struct brt_h
//write_now: printf("%s:%d Writing %d bytes\n", __FILE__, __LINE__, w.ndone); //write_now: printf("%s:%d Writing %d bytes\n", __FILE__, __LINE__, w.ndone);
{ {
// If the node has never been written, then write the whole buffer, including the zeros // If the node has never been written, then write the whole buffer, including the zeros
assert(blocknum.b>=0); invariant(blocknum.b>=0);
//printf("%s:%d h=%p\n", __FILE__, __LINE__, h); //printf("%s:%d h=%p\n", __FILE__, __LINE__, h);
//printf("%s:%d translated_blocknum_limit=%lu blocknum.b=%lu\n", __FILE__, __LINE__, h->translated_blocknum_limit, blocknum.b); //printf("%s:%d translated_blocknum_limit=%lu blocknum.b=%lu\n", __FILE__, __LINE__, h->translated_blocknum_limit, blocknum.b);
//printf("%s:%d allocator=%p\n", __FILE__, __LINE__, h->block_allocator); //printf("%s:%d allocator=%p\n", __FILE__, __LINE__, h->block_allocator);
...@@ -577,12 +575,12 @@ deserialize_child_buffer(BRTNODE node, int cnum, struct rbuf *rbuf, u_int32_t *l ...@@ -577,12 +575,12 @@ deserialize_child_buffer(BRTNODE node, int cnum, struct rbuf *rbuf, u_int32_t *l
local_fingerprint += node->rand4fingerprint * toku_calc_fingerprint_cmd(type, xids, key, keylen, val, vallen); local_fingerprint += node->rand4fingerprint * toku_calc_fingerprint_cmd(type, xids, key, keylen, val, vallen);
//printf("Found %s,%s\n", (char*)key, (char*)val); //printf("Found %s,%s\n", (char*)key, (char*)val);
int r = toku_fifo_enq(BNC_BUFFER(node, cnum), key, keylen, val, vallen, type, xids); /* Copies the data into the fifo */ int r = toku_fifo_enq(BNC_BUFFER(node, cnum), key, keylen, val, vallen, type, xids); /* Copies the data into the fifo */
assert(r == 0); lazy_assert_zero(r);
n_bytes_in_buffer += keylen + vallen + KEY_VALUE_OVERHEAD + BRT_CMD_OVERHEAD + xids_get_serialize_size(xids); n_bytes_in_buffer += keylen + vallen + KEY_VALUE_OVERHEAD + BRT_CMD_OVERHEAD + xids_get_serialize_size(xids);
//printf("Inserted\n"); //printf("Inserted\n");
xids_destroy(&xids); xids_destroy(&xids);
} }
assert(rbuf->ndone == rbuf->size); invariant(rbuf->ndone == rbuf->size);
BNC_NBYTESINBUF(node, cnum) = n_bytes_in_buffer; BNC_NBYTESINBUF(node, cnum) = n_bytes_in_buffer;
*local_fingerprint_ret = local_fingerprint; *local_fingerprint_ret = local_fingerprint;
...@@ -662,7 +660,7 @@ deserialize_brtnode_nonleaf_from_rbuf (BRTNODE result, bytevec magic, struct rbu ...@@ -662,7 +660,7 @@ deserialize_brtnode_nonleaf_from_rbuf (BRTNODE result, bytevec magic, struct rbu
MALLOC_N(result->u.n.n_children+1, result->u.n.childinfos); MALLOC_N(result->u.n.n_children+1, result->u.n.childinfos);
MALLOC_N(result->u.n.n_children, result->u.n.childkeys); MALLOC_N(result->u.n.n_children, result->u.n.childkeys);
//printf("n_children=%d\n", result->n_children); //printf("n_children=%d\n", result->n_children);
assert(result->u.n.n_children>=0); invariant(result->u.n.n_children>=0);
for (int i=0; i<result->u.n.n_children; i++) { for (int i=0; i<result->u.n.n_children; i++) {
u_int32_t childfp = rbuf_int(rb); u_int32_t childfp = rbuf_int(rb);
BNC_SUBTREE_FINGERPRINT(result, i)= childfp; BNC_SUBTREE_FINGERPRINT(result, i)= childfp;
...@@ -736,7 +734,7 @@ deserialize_brtnode_leaf_from_rbuf (BRTNODE result, bytevec magic, struct rbuf * ...@@ -736,7 +734,7 @@ deserialize_brtnode_leaf_from_rbuf (BRTNODE result, bytevec magic, struct rbuf *
// deserialize the number of partitions // deserialize the number of partitions
int npartitions = rbuf_int(rb); int npartitions = rbuf_int(rb);
assert(npartitions == 1); invariant(npartitions == 1);
// deserialize partition pivots // deserialize partition pivots
for (int p = 0; p < npartitions-1; p++) { for (int p = 0; p < npartitions-1; p++) {
...@@ -765,7 +763,7 @@ deserialize_brtnode_leaf_from_rbuf (BRTNODE result, bytevec magic, struct rbuf * ...@@ -765,7 +763,7 @@ deserialize_brtnode_leaf_from_rbuf (BRTNODE result, bytevec magic, struct rbuf *
LEAFENTRY le = (LEAFENTRY)(&rb->buf[rb->ndone]); LEAFENTRY le = (LEAFENTRY)(&rb->buf[rb->ndone]);
u_int32_t disksize = leafentry_disksize(le); u_int32_t disksize = leafentry_disksize(le);
rb->ndone += disksize; rb->ndone += disksize;
assert(rb->ndone<=rb->size); invariant(rb->ndone<=rb->size);
array[i]=(OMTVALUE)le; array[i]=(OMTVALUE)le;
actual_sum += x1764_memory(le, disksize); actual_sum += x1764_memory(le, disksize);
...@@ -782,7 +780,7 @@ deserialize_brtnode_leaf_from_rbuf (BRTNODE result, bytevec magic, struct rbuf * ...@@ -782,7 +780,7 @@ deserialize_brtnode_leaf_from_rbuf (BRTNODE result, bytevec magic, struct rbuf *
if (0) { died_1: toku_omt_destroy(&result->u.l.buffer); } if (0) { died_1: toku_omt_destroy(&result->u.l.buffer); }
return r; return r;
} }
assert(array==NULL); lazy_assert(array==NULL);
result->u.l.buffer_mempool.frag_size = start_of_data; result->u.l.buffer_mempool.frag_size = start_of_data;
result->u.l.buffer_mempool.free_offset = end_of_data; result->u.l.buffer_mempool.free_offset = end_of_data;
...@@ -824,7 +822,7 @@ deserialize_brtnode_from_rbuf (BLOCKNUM blocknum, u_int32_t fullhash, BRTNODE *b ...@@ -824,7 +822,7 @@ deserialize_brtnode_from_rbuf (BLOCKNUM blocknum, u_int32_t fullhash, BRTNODE *b
bytevec magic; bytevec magic;
rbuf_literal_bytes(rb, &magic, 8); rbuf_literal_bytes(rb, &magic, 8);
result->layout_version = rbuf_int(rb); result->layout_version = rbuf_int(rb);
assert(result->layout_version == BRT_LAYOUT_VERSION); invariant(result->layout_version == BRT_LAYOUT_VERSION);
result->layout_version_original = rbuf_int(rb); result->layout_version_original = rbuf_int(rb);
result->layout_version_read_from_disk = result->layout_version; result->layout_version_read_from_disk = result->layout_version;
result->nodesize = rbuf_int(rb); result->nodesize = rbuf_int(rb);
...@@ -866,14 +864,14 @@ decompress_from_raw_block_into_rbuf(u_int8_t *raw_block, size_t raw_block_size, ...@@ -866,14 +864,14 @@ decompress_from_raw_block_into_rbuf(u_int8_t *raw_block, size_t raw_block_size,
n_sub_blocks = toku_dtoh32(*(u_int32_t*)(&raw_block[node_header_overhead])); n_sub_blocks = toku_dtoh32(*(u_int32_t*)(&raw_block[node_header_overhead]));
// verify the number of sub blocks // verify the number of sub blocks
assert(0 <= n_sub_blocks && n_sub_blocks <= max_sub_blocks); invariant(0 <= n_sub_blocks && n_sub_blocks <= max_sub_blocks);
{ // verify the header checksum { // verify the header checksum
u_int32_t header_length = node_header_overhead + sub_block_header_size(n_sub_blocks); u_int32_t header_length = node_header_overhead + sub_block_header_size(n_sub_blocks);
assert(header_length <= raw_block_size); invariant(header_length <= raw_block_size);
u_int32_t xsum = x1764_memory(raw_block, header_length); u_int32_t xsum = x1764_memory(raw_block, header_length);
u_int32_t stored_xsum = toku_dtoh32(*(u_int32_t *)(raw_block + header_length)); u_int32_t stored_xsum = toku_dtoh32(*(u_int32_t *)(raw_block + header_length));
assert(xsum == stored_xsum); invariant(xsum == stored_xsum);
} }
int r; int r;
...@@ -904,7 +902,7 @@ decompress_from_raw_block_into_rbuf(u_int8_t *raw_block, size_t raw_block_size, ...@@ -904,7 +902,7 @@ decompress_from_raw_block_into_rbuf(u_int8_t *raw_block, size_t raw_block_size,
// allocate the uncompressed buffer // allocate the uncompressed buffer
size_t size = node_header_overhead + uncompressed_size; size_t size = node_header_overhead + uncompressed_size;
unsigned char *buf = toku_xmalloc(size); unsigned char *buf = toku_xmalloc(size);
assert(buf); lazy_assert(buf);
rbuf_init(rb, buf, size); rbuf_init(rb, buf, size);
// copy the uncompressed node header to the uncompressed buffer // copy the uncompressed node header to the uncompressed buffer
...@@ -918,7 +916,7 @@ decompress_from_raw_block_into_rbuf(u_int8_t *raw_block, size_t raw_block_size, ...@@ -918,7 +916,7 @@ decompress_from_raw_block_into_rbuf(u_int8_t *raw_block, size_t raw_block_size,
// decompress all the compressed sub blocks into the uncompressed buffer // decompress all the compressed sub blocks into the uncompressed buffer
r = decompress_all_sub_blocks(n_sub_blocks, sub_block, compressed_data, uncompressed_data, num_cores, brt_pool); r = decompress_all_sub_blocks(n_sub_blocks, sub_block, compressed_data, uncompressed_data, num_cores, brt_pool);
assert(r == 0); lazy_assert_zero(r);
toku_trace("decompress done"); toku_trace("decompress done");
...@@ -935,7 +933,7 @@ decompress_from_raw_block_into_rbuf_versioned(u_int32_t version, u_int8_t *raw_b ...@@ -935,7 +933,7 @@ decompress_from_raw_block_into_rbuf_versioned(u_int32_t version, u_int8_t *raw_b
r = decompress_from_raw_block_into_rbuf(raw_block, raw_block_size, rb, blocknum); r = decompress_from_raw_block_into_rbuf(raw_block, raw_block_size, rb, blocknum);
break; break;
default: default:
assert(FALSE); lazy_assert(FALSE);
} }
return r; return r;
} }
...@@ -951,19 +949,19 @@ deserialize_brtnode_from_rbuf_versioned (u_int32_t version, BLOCKNUM blocknum, u ...@@ -951,19 +949,19 @@ deserialize_brtnode_from_rbuf_versioned (u_int32_t version, BLOCKNUM blocknum, u
if (!upgrade) if (!upgrade)
r = deserialize_brtnode_from_rbuf(blocknum, fullhash, &brtnode_12, h, rb); r = deserialize_brtnode_from_rbuf(blocknum, fullhash, &brtnode_12, h, rb);
if (r==0) { if (r==0) {
assert(brtnode_12); lazy_assert(brtnode_12);
*brtnode = brtnode_12; *brtnode = brtnode_12;
} }
if (upgrade && r == 0) { if (upgrade && r == 0) {
toku_brtheader_lock(h); toku_brtheader_lock(h);
assert(h->num_blocks_to_upgrade>0); lazy_assert(h->num_blocks_to_upgrade>0);
h->num_blocks_to_upgrade--; h->num_blocks_to_upgrade--;
toku_brtheader_unlock(h); toku_brtheader_unlock(h);
(*brtnode)->dirty = 1; (*brtnode)->dirty = 1;
} }
break; // this is the only break break; // this is the only break
default: default:
assert(FALSE); lazy_assert(FALSE);
} }
return r; return r;
} }
...@@ -986,7 +984,7 @@ read_and_decompress_block_from_fd_into_rbuf(int fd, BLOCKNUM blocknum, ...@@ -986,7 +984,7 @@ read_and_decompress_block_from_fd_into_rbuf(int fd, BLOCKNUM blocknum,
{ {
// read the (partially compressed) block // read the (partially compressed) block
ssize_t rlen = pread(fd, raw_block, size, offset); ssize_t rlen = pread(fd, raw_block, size, offset);
assert((DISKOFF)rlen == size); lazy_assert((DISKOFF)rlen == size);
} }
// get the layout_version // get the layout_version
int layout_version; int layout_version;
...@@ -1067,7 +1065,7 @@ toku_maybe_upgrade_brt(BRT t) { // possibly do some work to complete the version ...@@ -1067,7 +1065,7 @@ toku_maybe_upgrade_brt(BRT t) { // possibly do some work to complete the version
} }
break; break;
default: default:
assert(FALSE); lazy_assert(FALSE);
} }
} }
if (r) { if (r) {
...@@ -1109,18 +1107,18 @@ void ...@@ -1109,18 +1107,18 @@ void
toku_verify_or_set_counts (BRTNODE node, BOOL set_fingerprints) { toku_verify_or_set_counts (BRTNODE node, BOOL set_fingerprints) {
/*foo*/ /*foo*/
if (node->height==0) { if (node->height==0) {
assert(node->u.l.buffer); lazy_assert(node->u.l.buffer);
struct sum_info sum_info = {0,0,0,0}; struct sum_info sum_info = {0,0,0,0};
toku_omt_iterate(node->u.l.buffer, sum_item, &sum_info); toku_omt_iterate(node->u.l.buffer, sum_item, &sum_info);
assert(sum_info.count==toku_omt_size(node->u.l.buffer)); lazy_assert(sum_info.count==toku_omt_size(node->u.l.buffer));
assert(sum_info.dsum==node->u.l.n_bytes_in_buffer); lazy_assert(sum_info.dsum==node->u.l.n_bytes_in_buffer);
assert(sum_info.msum == node->u.l.buffer_mempool.free_offset - node->u.l.buffer_mempool.frag_size); lazy_assert(sum_info.msum == node->u.l.buffer_mempool.free_offset - node->u.l.buffer_mempool.frag_size);
u_int32_t fps = node->rand4fingerprint * sum_info.fp; u_int32_t fps = node->rand4fingerprint * sum_info.fp;
if (set_fingerprints) { if (set_fingerprints) {
node->local_fingerprint = fps; node->local_fingerprint = fps;
} }
assert(fps==node->local_fingerprint); lazy_assert(fps==node->local_fingerprint);
} else { } else {
unsigned int sum = 0; unsigned int sum = 0;
for (int i=0; i<node->u.n.n_children; i++) for (int i=0; i<node->u.n.n_children; i++)
...@@ -1137,8 +1135,8 @@ toku_verify_or_set_counts (BRTNODE node, BOOL set_fingerprints) { ...@@ -1137,8 +1135,8 @@ toku_verify_or_set_counts (BRTNODE node, BOOL set_fingerprints) {
if (set_fingerprints) { if (set_fingerprints) {
node->local_fingerprint = fp; node->local_fingerprint = fp;
} }
assert(fp==node->local_fingerprint); lazy_assert(fp==node->local_fingerprint);
assert(sum==node->u.n.n_bytes_in_buffers); lazy_assert(sum==node->u.n.n_bytes_in_buffers);
} }
} }
...@@ -1169,16 +1167,16 @@ serialize_brt_header_min_size (u_int32_t version) { ...@@ -1169,16 +1167,16 @@ serialize_brt_header_min_size (u_int32_t version) {
); );
break; break;
default: default:
assert(FALSE); lazy_assert(FALSE);
} }
assert(size <= BLOCK_ALLOCATOR_HEADER_RESERVE); lazy_assert(size <= BLOCK_ALLOCATOR_HEADER_RESERVE);
return size; return size;
} }
int toku_serialize_brt_header_size (struct brt_header *h) { int toku_serialize_brt_header_size (struct brt_header *h) {
u_int32_t size = serialize_brt_header_min_size(h->layout_version); u_int32_t size = serialize_brt_header_min_size(h->layout_version);
//There is no dynamic data. //There is no dynamic data.
assert(size <= BLOCK_ALLOCATOR_HEADER_RESERVE); lazy_assert(size <= BLOCK_ALLOCATOR_HEADER_RESERVE);
return size; return size;
} }
...@@ -1203,14 +1201,14 @@ int toku_serialize_brt_header_to_wbuf (struct wbuf *wbuf, struct brt_header *h, ...@@ -1203,14 +1201,14 @@ int toku_serialize_brt_header_to_wbuf (struct wbuf *wbuf, struct brt_header *h,
wbuf_TXNID(wbuf, h->root_xid_that_created); wbuf_TXNID(wbuf, h->root_xid_that_created);
u_int32_t checksum = x1764_finish(&wbuf->checksum); u_int32_t checksum = x1764_finish(&wbuf->checksum);
wbuf_int(wbuf, checksum); wbuf_int(wbuf, checksum);
assert(wbuf->ndone == wbuf->size); lazy_assert(wbuf->ndone == wbuf->size);
return 0; return 0;
} }
int toku_serialize_brt_header_to (int fd, struct brt_header *h) { int toku_serialize_brt_header_to (int fd, struct brt_header *h) {
int rr = 0; int rr = 0;
if (h->panic) return h->panic; if (h->panic) return h->panic;
assert(h->type==BRTHEADER_CHECKPOINT_INPROGRESS); lazy_assert(h->type==BRTHEADER_CHECKPOINT_INPROGRESS);
toku_brtheader_lock(h); toku_brtheader_lock(h);
struct wbuf w_translation; struct wbuf w_translation;
int64_t size_translation; int64_t size_translation;
...@@ -1220,7 +1218,7 @@ int toku_serialize_brt_header_to (int fd, struct brt_header *h) { ...@@ -1220,7 +1218,7 @@ int toku_serialize_brt_header_to (int fd, struct brt_header *h) {
toku_serialize_translation_to_wbuf_unlocked(h->blocktable, &w_translation, toku_serialize_translation_to_wbuf_unlocked(h->blocktable, &w_translation,
&address_translation, &address_translation,
&size_translation); &size_translation);
assert(size_translation==w_translation.size); lazy_assert(size_translation==w_translation.size);
} }
struct wbuf w_main; struct wbuf w_main;
unsigned int size_main = toku_serialize_brt_header_size (h); unsigned int size_main = toku_serialize_brt_header_size (h);
...@@ -1228,9 +1226,9 @@ int toku_serialize_brt_header_to (int fd, struct brt_header *h) { ...@@ -1228,9 +1226,9 @@ int toku_serialize_brt_header_to (int fd, struct brt_header *h) {
wbuf_init(&w_main, toku_malloc(size_main), size_main); wbuf_init(&w_main, toku_malloc(size_main), size_main);
{ {
int r=toku_serialize_brt_header_to_wbuf(&w_main, h, address_translation, size_translation); int r=toku_serialize_brt_header_to_wbuf(&w_main, h, address_translation, size_translation);
assert(r==0); lazy_assert_zero(r);
} }
assert(w_main.ndone==size_main); lazy_assert(w_main.ndone==size_main);
} }
toku_brtheader_unlock(h); toku_brtheader_unlock(h);
lock_for_pwrite(); lock_for_pwrite();
...@@ -1277,7 +1275,7 @@ toku_serialize_descriptor_size(const DESCRIPTOR desc) { ...@@ -1277,7 +1275,7 @@ toku_serialize_descriptor_size(const DESCRIPTOR desc) {
void void
toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, const DESCRIPTOR desc) { toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, const DESCRIPTOR desc) {
if (desc->version==0) assert(desc->dbt.size==0); if (desc->version==0) lazy_assert(desc->dbt.size==0);
wbuf_int(wb, desc->version); wbuf_int(wb, desc->version);
wbuf_bytes(wb, desc->dbt.data, desc->dbt.size); wbuf_bytes(wb, desc->dbt.data, desc->dbt.size);
} }
...@@ -1298,7 +1296,7 @@ toku_serialize_descriptor_contents_to_fd(int fd, const DESCRIPTOR desc, DISKOFF ...@@ -1298,7 +1296,7 @@ toku_serialize_descriptor_contents_to_fd(int fd, const DESCRIPTOR desc, DISKOFF
u_int32_t checksum = x1764_finish(&w.checksum); u_int32_t checksum = x1764_finish(&w.checksum);
wbuf_int(&w, checksum); wbuf_int(&w, checksum);
} }
assert(w.ndone==w.size); lazy_assert(w.ndone==w.size);
{ {
lock_for_pwrite(); lock_for_pwrite();
//Actual Write translation table //Actual Write translation table
...@@ -1319,15 +1317,15 @@ deserialize_descriptor_from_rbuf(struct rbuf *rb, DESCRIPTOR desc, BOOL temporar ...@@ -1319,15 +1317,15 @@ deserialize_descriptor_from_rbuf(struct rbuf *rb, DESCRIPTOR desc, BOOL temporar
if (size>0) { if (size>0) {
if (!temporary) { if (!temporary) {
data_copy = toku_memdup(data, size); //Cannot keep the reference from rbuf. Must copy. data_copy = toku_memdup(data, size); //Cannot keep the reference from rbuf. Must copy.
assert(data_copy); lazy_assert(data_copy);
} }
} }
else { else {
assert(size==0); lazy_assert(size==0);
data_copy = NULL; data_copy = NULL;
} }
toku_fill_dbt(&desc->dbt, data_copy, size); toku_fill_dbt(&desc->dbt, data_copy, size);
if (desc->version==0) assert(desc->dbt.size==0); if (desc->version==0) lazy_assert(desc->dbt.size==0);
} }
static void static void
...@@ -1337,13 +1335,13 @@ deserialize_descriptor_from(int fd, struct brt_header *h, DESCRIPTOR desc) { ...@@ -1337,13 +1335,13 @@ deserialize_descriptor_from(int fd, struct brt_header *h, DESCRIPTOR desc) {
toku_get_descriptor_offset_size(h->blocktable, &offset, &size); toku_get_descriptor_offset_size(h->blocktable, &offset, &size);
memset(desc, 0, sizeof(*desc)); memset(desc, 0, sizeof(*desc));
if (size > 0) { if (size > 0) {
assert(size>=4); //4 for checksum lazy_assert(size>=4); //4 for checksum
{ {
unsigned char *XMALLOC_N(size, dbuf); unsigned char *XMALLOC_N(size, dbuf);
{ {
lock_for_pwrite(); lock_for_pwrite();
ssize_t r = pread(fd, dbuf, size, offset); ssize_t r = pread(fd, dbuf, size, offset);
assert(r==size); lazy_assert(r==size);
unlock_for_pwrite(); unlock_for_pwrite();
} }
{ {
...@@ -1351,14 +1349,14 @@ deserialize_descriptor_from(int fd, struct brt_header *h, DESCRIPTOR desc) { ...@@ -1351,14 +1349,14 @@ deserialize_descriptor_from(int fd, struct brt_header *h, DESCRIPTOR desc) {
u_int32_t x1764 = x1764_memory(dbuf, size-4); u_int32_t x1764 = x1764_memory(dbuf, size-4);
//printf("%s:%d read from %ld (x1764 offset=%ld) size=%ld\n", __FILE__, __LINE__, block_translation_address_on_disk, offset, block_translation_size_on_disk); //printf("%s:%d read from %ld (x1764 offset=%ld) size=%ld\n", __FILE__, __LINE__, block_translation_address_on_disk, offset, block_translation_size_on_disk);
u_int32_t stored_x1764 = toku_dtoh32(*(int*)(dbuf + size-4)); u_int32_t stored_x1764 = toku_dtoh32(*(int*)(dbuf + size-4));
assert(x1764 == stored_x1764); lazy_assert(x1764 == stored_x1764);
} }
{ {
struct rbuf rb = {.buf = dbuf, .size = size, .ndone = 0}; struct rbuf rb = {.buf = dbuf, .size = size, .ndone = 0};
//Not temporary; must have a toku_memdup'd copy. //Not temporary; must have a toku_memdup'd copy.
deserialize_descriptor_from_rbuf(&rb, desc, FALSE); deserialize_descriptor_from_rbuf(&rb, desc, FALSE);
} }
assert(toku_serialize_descriptor_size(desc)+4 == size); lazy_assert(toku_serialize_descriptor_size(desc)+4 == size);
toku_free(dbuf); toku_free(dbuf);
} }
} }
...@@ -1381,7 +1379,7 @@ deserialize_brtheader (int fd, struct rbuf *rb, struct brt_header **brth) { ...@@ -1381,7 +1379,7 @@ deserialize_brtheader (int fd, struct rbuf *rb, struct brt_header **brth) {
//Check magic number //Check magic number
bytevec magic; bytevec magic;
rbuf_literal_bytes(&rc, &magic, 8); rbuf_literal_bytes(&rc, &magic, 8);
assert(memcmp(magic,"tokudata",8)==0); lazy_assert(memcmp(magic,"tokudata",8)==0);
} }
...@@ -1400,24 +1398,24 @@ deserialize_brtheader (int fd, struct rbuf *rb, struct brt_header **brth) { ...@@ -1400,24 +1398,24 @@ deserialize_brtheader (int fd, struct rbuf *rb, struct brt_header **brth) {
//version MUST be in network order on disk regardless of disk order //version MUST be in network order on disk regardless of disk order
h->layout_version = rbuf_network_int(&rc); h->layout_version = rbuf_network_int(&rc);
//TODO: #1924 //TODO: #1924
assert(h->layout_version==BRT_LAYOUT_VERSION); lazy_assert(h->layout_version==BRT_LAYOUT_VERSION);
//Size MUST be in network order regardless of disk order. //Size MUST be in network order regardless of disk order.
u_int32_t size = rbuf_network_int(&rc); u_int32_t size = rbuf_network_int(&rc);
assert(size==rc.size); lazy_assert(size==rc.size);
bytevec tmp_byte_order_check; bytevec tmp_byte_order_check;
rbuf_literal_bytes(&rc, &tmp_byte_order_check, 8); //Must not translate byte order rbuf_literal_bytes(&rc, &tmp_byte_order_check, 8); //Must not translate byte order
int64_t byte_order_stored = *(int64_t*)tmp_byte_order_check; int64_t byte_order_stored = *(int64_t*)tmp_byte_order_check;
assert(byte_order_stored == toku_byte_order_host); lazy_assert(byte_order_stored == toku_byte_order_host);
h->checkpoint_count = rbuf_ulonglong(&rc); h->checkpoint_count = rbuf_ulonglong(&rc);
h->checkpoint_lsn = rbuf_lsn(&rc); h->checkpoint_lsn = rbuf_lsn(&rc);
h->nodesize = rbuf_int(&rc); h->nodesize = rbuf_int(&rc);
DISKOFF translation_address_on_disk = rbuf_diskoff(&rc); DISKOFF translation_address_on_disk = rbuf_diskoff(&rc);
DISKOFF translation_size_on_disk = rbuf_diskoff(&rc); DISKOFF translation_size_on_disk = rbuf_diskoff(&rc);
assert(translation_address_on_disk>0); lazy_assert(translation_address_on_disk>0);
assert(translation_size_on_disk>0); lazy_assert(translation_size_on_disk>0);
// printf("%s:%d translated_blocknum_limit=%ld, block_translation_address_on_disk=%ld\n", __FILE__, __LINE__, h->translated_blocknum_limit, h->block_translation_address_on_disk); // printf("%s:%d translated_blocknum_limit=%ld, block_translation_address_on_disk=%ld\n", __FILE__, __LINE__, h->translated_blocknum_limit, h->block_translation_address_on_disk);
//Load translation table //Load translation table
...@@ -1427,7 +1425,7 @@ deserialize_brtheader (int fd, struct rbuf *rb, struct brt_header **brth) { ...@@ -1427,7 +1425,7 @@ deserialize_brtheader (int fd, struct rbuf *rb, struct brt_header **brth) {
{ {
// This cast is messed up in 32-bits if the block translation table is ever more than 4GB. But in that case, the translation table itself won't fit in main memory. // This cast is messed up in 32-bits if the block translation table is ever more than 4GB. But in that case, the translation table itself won't fit in main memory.
ssize_t r = pread(fd, tbuf, translation_size_on_disk, translation_address_on_disk); ssize_t r = pread(fd, tbuf, translation_size_on_disk, translation_address_on_disk);
assert(r==translation_size_on_disk); lazy_assert(r==translation_size_on_disk);
} }
unlock_for_pwrite(); unlock_for_pwrite();
// Create table and read in data. // Create table and read in data.
...@@ -1469,7 +1467,7 @@ deserialize_brtheader_versioned (int fd, struct rbuf *rb, struct brt_header **br ...@@ -1469,7 +1467,7 @@ deserialize_brtheader_versioned (int fd, struct rbuf *rb, struct brt_header **br
if (!upgrade) if (!upgrade)
rval = deserialize_brtheader (fd, rb, &brth_12); rval = deserialize_brtheader (fd, rb, &brth_12);
if (rval == 0) { if (rval == 0) {
assert(brth_12); lazy_assert(brth_12);
*brth = brth_12; *brth = brth_12;
} }
if (upgrade && rval == 0) { if (upgrade && rval == 0) {
...@@ -1480,10 +1478,10 @@ deserialize_brtheader_versioned (int fd, struct rbuf *rb, struct brt_header **br ...@@ -1480,10 +1478,10 @@ deserialize_brtheader_versioned (int fd, struct rbuf *rb, struct brt_header **br
} }
break; // this is the only break break; // this is the only break
default: default:
assert(FALSE); lazy_assert(FALSE);
} }
if (rval == 0) { if (rval == 0) {
assert((*brth)->layout_version == BRT_LAYOUT_VERSION); lazy_assert((*brth)->layout_version == BRT_LAYOUT_VERSION);
(*brth)->layout_version_read_from_disk = version; (*brth)->layout_version_read_from_disk = version;
(*brth)->upgrade_brt_performed = FALSE; (*brth)->upgrade_brt_performed = FALSE;
} }
...@@ -1505,7 +1503,7 @@ deserialize_brtheader_from_fd_into_rbuf(int fd, toku_off_t offset, struct rbuf * ...@@ -1505,7 +1503,7 @@ deserialize_brtheader_from_fd_into_rbuf(int fd, toku_off_t offset, struct rbuf *
rb->buf = NULL; rb->buf = NULL;
int64_t n = pread(fd, prefix, prefix_size, offset); int64_t n = pread(fd, prefix, prefix_size, offset);
if (n==0) r = TOKUDB_DICTIONARY_NO_HEADER; if (n==0) r = TOKUDB_DICTIONARY_NO_HEADER;
else if (n<0) {r = errno; assert(r!=0);} else if (n<0) {r = errno; lazy_assert(r!=0);}
else if (n!=prefix_size) r = EINVAL; else if (n!=prefix_size) r = EINVAL;
else { else {
rb->size = prefix_size; rb->size = prefix_size;
...@@ -1543,7 +1541,7 @@ deserialize_brtheader_from_fd_into_rbuf(int fd, toku_off_t offset, struct rbuf * ...@@ -1543,7 +1541,7 @@ deserialize_brtheader_from_fd_into_rbuf(int fd, toku_off_t offset, struct rbuf *
rb->buf = NULL; //Prevent freeing of 'prefix' rb->buf = NULL; //Prevent freeing of 'prefix'
} }
if (r==0) { if (r==0) {
assert(rb->ndone==prefix_size); lazy_assert(rb->ndone==prefix_size);
rb->size = size; rb->size = size;
rb->buf = toku_xmalloc(rb->size); rb->buf = toku_xmalloc(rb->size);
} }
...@@ -1551,7 +1549,7 @@ deserialize_brtheader_from_fd_into_rbuf(int fd, toku_off_t offset, struct rbuf * ...@@ -1551,7 +1549,7 @@ deserialize_brtheader_from_fd_into_rbuf(int fd, toku_off_t offset, struct rbuf *
n = pread(fd, rb->buf, rb->size, offset); n = pread(fd, rb->buf, rb->size, offset);
if (n==-1) { if (n==-1) {
r = errno; r = errno;
assert(r!=0); lazy_assert(r!=0);
} }
else if (n!=(int64_t)rb->size) r = EINVAL; //Header might be useless (wrong size) or could be a disk read error. else if (n!=(int64_t)rb->size) r = EINVAL; //Header might be useless (wrong size) or could be a disk read error.
} }
...@@ -1620,9 +1618,9 @@ toku_deserialize_brtheader_from (int fd, struct brt_header **brth) { ...@@ -1620,9 +1618,9 @@ toku_deserialize_brtheader_from (int fd, struct brt_header **brth) {
version = version_1; version = version_1;
} }
if (r0==0 && r1==0) { if (r0==0 && r1==0) {
assert(checkpoint_count_1 != checkpoint_count_0); lazy_assert(checkpoint_count_1 != checkpoint_count_0);
if (rb == &rb_0) assert(version_0 >= version_1); if (rb == &rb_0) lazy_assert(version_0 >= version_1);
else assert(version_0 <= version_1); else lazy_assert(version_0 <= version_1);
} }
} }
int r = 0; int r = 0;
...@@ -1638,7 +1636,7 @@ toku_deserialize_brtheader_from (int fd, struct brt_header **brth) { ...@@ -1638,7 +1636,7 @@ toku_deserialize_brtheader_from (int fd, struct brt_header **brth) {
r = TOKUDB_DICTIONARY_NO_HEADER; r = TOKUDB_DICTIONARY_NO_HEADER;
} }
else r = r0; //Arbitrarily report the error from the first header. else r = r0; //Arbitrarily report the error from the first header.
assert(r!=0); lazy_assert(r!=0);
} }
if (r==0) r = deserialize_brtheader_versioned(fd, rb, brth, version); if (r==0) r = deserialize_brtheader_versioned(fd, rb, brth, version);
...@@ -1676,7 +1674,7 @@ serialize_rollback_log_node_to_buf(ROLLBACK_LOG_NODE log, char *buf, size_t calc ...@@ -1676,7 +1674,7 @@ serialize_rollback_log_node_to_buf(ROLLBACK_LOG_NODE log, char *buf, size_t calc
wbuf_init(&wb, buf, calculated_size); wbuf_init(&wb, buf, calculated_size);
{ //Serialize rollback log to local wbuf { //Serialize rollback log to local wbuf
wbuf_nocrc_literal_bytes(&wb, "tokuroll", 8); wbuf_nocrc_literal_bytes(&wb, "tokuroll", 8);
assert(log->layout_version == BRT_LAYOUT_VERSION); lazy_assert(log->layout_version == BRT_LAYOUT_VERSION);
wbuf_nocrc_int(&wb, log->layout_version); wbuf_nocrc_int(&wb, log->layout_version);
wbuf_nocrc_int(&wb, log->layout_version_original); wbuf_nocrc_int(&wb, log->layout_version_original);
wbuf_nocrc_TXNID(&wb, log->txnid); wbuf_nocrc_TXNID(&wb, log->txnid);
...@@ -1694,11 +1692,11 @@ serialize_rollback_log_node_to_buf(ROLLBACK_LOG_NODE log, char *buf, size_t calc ...@@ -1694,11 +1692,11 @@ serialize_rollback_log_node_to_buf(ROLLBACK_LOG_NODE log, char *buf, size_t calc
for (item = log->newest_logentry; item; item = item->prev) { for (item = log->newest_logentry; item; item = item->prev) {
toku_logger_rollback_wbuf_nocrc_write(&wb, item); toku_logger_rollback_wbuf_nocrc_write(&wb, item);
} }
assert(done_before + log->rollentry_resident_bytecount == wb.ndone); lazy_assert(done_before + log->rollentry_resident_bytecount == wb.ndone);
} }
} }
assert(wb.ndone == wb.size); lazy_assert(wb.ndone == wb.size);
assert(calculated_size==wb.ndone); lazy_assert(calculated_size==wb.ndone);
} }
static int static int
...@@ -1713,8 +1711,8 @@ toku_serialize_rollback_log_to_memory (ROLLBACK_LOG_NODE log, ...@@ -1713,8 +1711,8 @@ toku_serialize_rollback_log_to_memory (ROLLBACK_LOG_NODE log,
int n_sub_blocks = 0, sub_block_size = 0; int n_sub_blocks = 0, sub_block_size = 0;
size_t data_size = calculated_size - node_header_overhead; size_t data_size = calculated_size - node_header_overhead;
choose_sub_block_size(data_size, max_sub_blocks, &sub_block_size, &n_sub_blocks); choose_sub_block_size(data_size, max_sub_blocks, &sub_block_size, &n_sub_blocks);
assert(0 < n_sub_blocks && n_sub_blocks <= max_sub_blocks); lazy_assert(0 < n_sub_blocks && n_sub_blocks <= max_sub_blocks);
assert(sub_block_size > 0); lazy_assert(sub_block_size > 0);
// set the initial sub block size for all of the sub blocks // set the initial sub block size for all of the sub blocks
struct sub_block sub_block[n_sub_blocks]; struct sub_block sub_block[n_sub_blocks];
...@@ -1746,7 +1744,7 @@ toku_serialize_rollback_log_to (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE log ...@@ -1746,7 +1744,7 @@ toku_serialize_rollback_log_to (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE log
} }
{ {
assert(blocknum.b>=0); lazy_assert(blocknum.b>=0);
DISKOFF offset; DISKOFF offset;
toku_blocknum_realloc_on_disk(h->blocktable, blocknum, n_to_write, &offset, toku_blocknum_realloc_on_disk(h->blocktable, blocknum, n_to_write, &offset,
h, for_checkpoint); //dirties h h, for_checkpoint); //dirties h
...@@ -1773,10 +1771,10 @@ deserialize_rollback_log_from_rbuf (BLOCKNUM blocknum, u_int32_t fullhash, ROLLB ...@@ -1773,10 +1771,10 @@ deserialize_rollback_log_from_rbuf (BLOCKNUM blocknum, u_int32_t fullhash, ROLLB
//printf("Deserializing %lld datasize=%d\n", off, datasize); //printf("Deserializing %lld datasize=%d\n", off, datasize);
bytevec magic; bytevec magic;
rbuf_literal_bytes(rb, &magic, 8); rbuf_literal_bytes(rb, &magic, 8);
assert(!memcmp(magic, "tokuroll", 8)); lazy_assert(!memcmp(magic, "tokuroll", 8));
result->layout_version = rbuf_int(rb); result->layout_version = rbuf_int(rb);
assert(result->layout_version == BRT_LAYOUT_VERSION); lazy_assert(result->layout_version == BRT_LAYOUT_VERSION);
result->layout_version_original = rbuf_int(rb); result->layout_version_original = rbuf_int(rb);
result->layout_version_read_from_disk = result->layout_version; result->layout_version_read_from_disk = result->layout_version;
result->dirty = FALSE; result->dirty = FALSE;
...@@ -1803,7 +1801,7 @@ deserialize_rollback_log_from_rbuf (BLOCKNUM blocknum, u_int32_t fullhash, ROLLB ...@@ -1803,7 +1801,7 @@ deserialize_rollback_log_from_rbuf (BLOCKNUM blocknum, u_int32_t fullhash, ROLLB
if (0) { died1: memarena_close(&result->rollentry_arena); goto died0; } if (0) { died1: memarena_close(&result->rollentry_arena); goto died0; }
//Load rollback entries //Load rollback entries
assert(rb->size > 4); lazy_assert(rb->size > 4);
//Start with empty list //Start with empty list
result->oldest_logentry = result->newest_logentry = NULL; result->oldest_logentry = result->newest_logentry = NULL;
while (rb->ndone < rb->size) { while (rb->ndone < rb->size) {
...@@ -1849,13 +1847,13 @@ deserialize_rollback_log_from_rbuf_versioned (u_int32_t version, BLOCKNUM blockn ...@@ -1849,13 +1847,13 @@ deserialize_rollback_log_from_rbuf_versioned (u_int32_t version, BLOCKNUM blockn
if (!upgrade) if (!upgrade)
r = deserialize_rollback_log_from_rbuf(blocknum, fullhash, &rollback_log_node, h, rb); r = deserialize_rollback_log_from_rbuf(blocknum, fullhash, &rollback_log_node, h, rb);
if (r==0) { if (r==0) {
assert(rollback_log_node); lazy_assert(rollback_log_node);
*log = rollback_log_node; *log = rollback_log_node;
} }
if (upgrade && r == 0) (*log)->dirty = 1; if (upgrade && r == 0) (*log)->dirty = 1;
break; // this is the only break break; // this is the only break
default: default:
assert(FALSE); lazy_assert(FALSE);
} }
return r; return r;
} }
......
...@@ -48,7 +48,7 @@ Flush_this_child (node, childnum, BOOL *did_io) { ...@@ -48,7 +48,7 @@ Flush_this_child (node, childnum, BOOL *did_io) {
Flush_some_child (node, BOOL *did_io) { Flush_some_child (node, BOOL *did_io) {
i = pick heaviest child() i = pick heaviest child()
assert(i>0); // there must be such a child lazy_assert(i>0); // there must be such a child
return Flush_this_child (node, i, did_io) return Flush_this_child (node, i, did_io)
} }
...@@ -177,11 +177,11 @@ message are not gorged. (But they may be hungry or too fat or too thin.) ...@@ -177,11 +177,11 @@ message are not gorged. (But they may be hungry or too fat or too thin.)
void void
toku_brt_header_suppress_rollbacks(struct brt_header *h, TOKUTXN txn) { toku_brt_header_suppress_rollbacks(struct brt_header *h, TOKUTXN txn) {
TXNID txnid = toku_txn_get_txnid(txn); TXNID txnid = toku_txn_get_txnid(txn);
assert(h->txnid_that_created_or_locked_when_empty == TXNID_NONE || lazy_assert(h->txnid_that_created_or_locked_when_empty == TXNID_NONE ||
h->txnid_that_created_or_locked_when_empty == txnid); h->txnid_that_created_or_locked_when_empty == txnid);
h->txnid_that_created_or_locked_when_empty = txnid; h->txnid_that_created_or_locked_when_empty = txnid;
TXNID rootid = toku_txn_get_root_txnid(txn); TXNID rootid = toku_txn_get_root_txnid(txn);
assert(h->root_that_created_or_locked_when_empty == TXNID_NONE || lazy_assert(h->root_that_created_or_locked_when_empty == TXNID_NONE ||
h->root_that_created_or_locked_when_empty == rootid); h->root_that_created_or_locked_when_empty == rootid);
h->root_that_created_or_locked_when_empty = rootid; h->root_that_created_or_locked_when_empty = rootid;
} }
...@@ -200,7 +200,7 @@ enum reactivity { RE_STABLE, RE_FUSIBLE, RE_FISSIBLE }; ...@@ -200,7 +200,7 @@ enum reactivity { RE_STABLE, RE_FUSIBLE, RE_FISSIBLE };
static enum reactivity static enum reactivity
get_leaf_reactivity (BRTNODE node) { get_leaf_reactivity (BRTNODE node) {
enum reactivity re = RE_STABLE; enum reactivity re = RE_STABLE;
assert(node->height==0); lazy_assert(node->height==0);
if (node->dirty) { if (node->dirty) {
unsigned int size = toku_serialize_brtnode_size(node); unsigned int size = toku_serialize_brtnode_size(node);
if (size > node->nodesize && toku_omt_size(node->u.l.buffer) > 1) if (size > node->nodesize && toku_omt_size(node->u.l.buffer) > 1)
...@@ -213,7 +213,7 @@ get_leaf_reactivity (BRTNODE node) { ...@@ -213,7 +213,7 @@ get_leaf_reactivity (BRTNODE node) {
static enum reactivity static enum reactivity
get_nonleaf_reactivity (BRTNODE node) { get_nonleaf_reactivity (BRTNODE node) {
assert(node->height>0); lazy_assert(node->height>0);
int n_children = node->u.n.n_children; int n_children = node->u.n.n_children;
if (n_children > TREE_FANOUT) return RE_FISSIBLE; if (n_children > TREE_FANOUT) return RE_FISSIBLE;
if (n_children*4 < TREE_FANOUT) return RE_FUSIBLE; if (n_children*4 < TREE_FANOUT) return RE_FUSIBLE;
...@@ -232,7 +232,7 @@ static int ...@@ -232,7 +232,7 @@ static int
flush_this_child (BRT t, BRTNODE node, int childnum, enum reactivity *child_re, BOOL *did_io); flush_this_child (BRT t, BRTNODE node, int childnum, enum reactivity *child_re, BOOL *did_io);
static void brt_verify_flags(BRT brt, BRTNODE node) { static void brt_verify_flags(BRT brt, BRTNODE node) {
assert(brt->flags == node->flags); lazy_assert(brt->flags == node->flags);
} }
int toku_brt_debug_mode = 0; int toku_brt_debug_mode = 0;
...@@ -252,11 +252,11 @@ int toku_brt_debug_mode = 0; ...@@ -252,11 +252,11 @@ int toku_brt_debug_mode = 0;
#endif #endif
static u_int32_t compute_child_fullhash (CACHEFILE cf, BRTNODE node, int childnum) { static u_int32_t compute_child_fullhash (CACHEFILE cf, BRTNODE node, int childnum) {
assert(node->height>0 && childnum<node->u.n.n_children); lazy_assert(node->height>0 && childnum<node->u.n.n_children);
switch (BNC_HAVE_FULLHASH(node, childnum)) { switch (BNC_HAVE_FULLHASH(node, childnum)) {
case TRUE: case TRUE:
{ {
assert(BNC_FULLHASH(node, childnum)==toku_cachetable_hash(cf, BNC_BLOCKNUM(node, childnum))); lazy_assert(BNC_FULLHASH(node, childnum)==toku_cachetable_hash(cf, BNC_BLOCKNUM(node, childnum)));
return BNC_FULLHASH(node, childnum); return BNC_FULLHASH(node, childnum);
} }
case FALSE: case FALSE:
...@@ -302,10 +302,10 @@ brt_leaf_check_leaf_stats (BRTNODE node) ...@@ -302,10 +302,10 @@ brt_leaf_check_leaf_stats (BRTNODE node)
static int count=0; count++; static int count=0; count++;
if (node->height>0) return; if (node->height>0) return;
struct subtree_estimates e = calc_leaf_stats(node); struct subtree_estimates e = calc_leaf_stats(node);
assert(e.ndata == node->u.l.leaf_stats.ndata); lazy_assert(e.ndata == node->u.l.leaf_stats.ndata);
assert(e.nkeys == node->u.l.leaf_stats.nkeys); lazy_assert(e.nkeys == node->u.l.leaf_stats.nkeys);
assert(e.dsize == node->u.l.leaf_stats.dsize); lazy_assert(e.dsize == node->u.l.leaf_stats.dsize);
assert(node->u.l.leaf_stats.exact); lazy_assert(node->u.l.leaf_stats.exact);
} }
// This should be done incrementally in most cases. // This should be done incrementally in most cases.
...@@ -336,7 +336,7 @@ fixup_child_fingerprint (BRTNODE node, int childnum_of_node, BRTNODE child) ...@@ -336,7 +336,7 @@ fixup_child_fingerprint (BRTNODE node, int childnum_of_node, BRTNODE child)
} else { } else {
estimates = child->u.l.leaf_stats; estimates = child->u.l.leaf_stats;
#ifdef SLOWSLOW #ifdef SLOWSLOW
assert(estimates.ndata == child->u.l.leaf_stats.ndata); lazy_assert(estimates.ndata == child->u.l.leaf_stats.ndata);
struct fill_leafnode_estimates_state s = {&estimates, (OMTVALUE)NULL}; struct fill_leafnode_estimates_state s = {&estimates, (OMTVALUE)NULL};
toku_omt_iterate(child->u.l.buffer, fill_leafnode_estimates, &s); toku_omt_iterate(child->u.l.buffer, fill_leafnode_estimates, &s);
#endif #endif
...@@ -362,7 +362,7 @@ verify_local_fingerprint_nonleaf (BRTNODE node) ...@@ -362,7 +362,7 @@ verify_local_fingerprint_nonleaf (BRTNODE node)
fp += toku_calc_fingerprint_cmd(type, xids, key, keylen, data, datalen); fp += toku_calc_fingerprint_cmd(type, xids, key, keylen, data, datalen);
); );
fp *= node->rand4fingerprint; fp *= node->rand4fingerprint;
assert(fp==node->local_fingerprint); lazy_assert(fp==node->local_fingerprint);
} }
} }
...@@ -375,7 +375,7 @@ toku_verify_estimates (BRT t, BRTNODE node) { ...@@ -375,7 +375,7 @@ toku_verify_estimates (BRT t, BRTNODE node) {
u_int32_t fullhash = compute_child_fullhash(t->cf, node, childnum); u_int32_t fullhash = compute_child_fullhash(t->cf, node, childnum);
void *childnode_v; void *childnode_v;
int r = toku_cachetable_get_and_pin(t->cf, childblocknum, fullhash, &childnode_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, t->h); int r = toku_cachetable_get_and_pin(t->cf, childblocknum, fullhash, &childnode_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, t->h);
assert(r==0); lazy_assert_zero(r);
BRTNODE childnode = childnode_v; BRTNODE childnode = childnode_v;
// we'll just do this estimate // we'll just do this estimate
u_int64_t child_estimate = 0; u_int64_t child_estimate = 0;
...@@ -387,7 +387,7 @@ toku_verify_estimates (BRT t, BRTNODE node) { ...@@ -387,7 +387,7 @@ toku_verify_estimates (BRT t, BRTNODE node) {
child_estimate += BNC_SUBTREE_ESTIMATES(childnode, i).ndata; child_estimate += BNC_SUBTREE_ESTIMATES(childnode, i).ndata;
} }
} }
assert(BNC_SUBTREE_ESTIMATES(node, childnum).ndata==child_estimate); lazy_assert(BNC_SUBTREE_ESTIMATES(node, childnum).ndata==child_estimate);
toku_unpin_brtnode(t, childnode); toku_unpin_brtnode(t, childnode);
} }
} }
...@@ -426,7 +426,7 @@ static uint64_t dict_id_serial = 1; ...@@ -426,7 +426,7 @@ static uint64_t dict_id_serial = 1;
static DICTIONARY_ID static DICTIONARY_ID
next_dict_id(void) { next_dict_id(void) {
uint32_t i = toku_sync_fetch_and_increment_uint64(&dict_id_serial); uint32_t i = toku_sync_fetch_and_increment_uint64(&dict_id_serial);
assert(i); // guarantee unique dictionary id by asserting 64-bit counter never wraps lazy_assert(i); // guarantee unique dictionary id by asserting 64-bit counter never wraps
DICTIONARY_ID d = {.dictid = i}; DICTIONARY_ID d = {.dictid = i};
return d; return d;
} }
...@@ -456,7 +456,7 @@ void toku_brtnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename ...@@ -456,7 +456,7 @@ void toku_brtnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename
printf("\n"); printf("\n");
} }
//if (modified_lsn.lsn > brtnode->lsn.lsn) brtnode->lsn=modified_lsn; //if (modified_lsn.lsn > brtnode->lsn.lsn) brtnode->lsn=modified_lsn;
assert(brtnode->thisnodename.b==nodename.b); lazy_assert(brtnode->thisnodename.b==nodename.b);
//printf("%s:%d %p->mdict[0]=%p\n", __FILE__, __LINE__, brtnode, brtnode->mdicts[0]); //printf("%s:%d %p->mdict[0]=%p\n", __FILE__, __LINE__, brtnode, brtnode->mdicts[0]);
if (write_me) { if (write_me) {
if (!h->panic) { // if the brt panicked, stop writing, otherwise try to write it. if (!h->panic) { // if the brt panicked, stop writing, otherwise try to write it.
...@@ -484,7 +484,7 @@ void toku_brtnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename ...@@ -484,7 +484,7 @@ void toku_brtnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename
//fd is protected (must be holding fdlock) //fd is protected (must be holding fdlock)
int toku_brtnode_fetch_callback (CACHEFILE UU(cachefile), int fd, BLOCKNUM nodename, u_int32_t fullhash, void **brtnode_pv, long *sizep, void*extraargs) { int toku_brtnode_fetch_callback (CACHEFILE UU(cachefile), int fd, BLOCKNUM nodename, u_int32_t fullhash, void **brtnode_pv, long *sizep, void*extraargs) {
assert(extraargs); lazy_assert(extraargs);
struct brt_header *h = extraargs; struct brt_header *h = extraargs;
BRTNODE *result=(BRTNODE*)brtnode_pv; BRTNODE *result=(BRTNODE*)brtnode_pv;
int r = toku_deserialize_brtnode_from(fd, nodename, fullhash, result, h); int r = toku_deserialize_brtnode_from(fd, nodename, fullhash, result, h);
...@@ -531,7 +531,7 @@ verify_in_mempool (OMTVALUE lev, u_int32_t UU(idx), void *vmp) ...@@ -531,7 +531,7 @@ verify_in_mempool (OMTVALUE lev, u_int32_t UU(idx), void *vmp)
{ {
LEAFENTRY le=lev; LEAFENTRY le=lev;
struct mempool *mp=vmp; struct mempool *mp=vmp;
assert(toku_mempool_inrange(mp, le, leafentry_memsize(le))); lazy_assert(toku_mempool_inrange(mp, le, leafentry_memsize(le)));
return 0; return 0;
} }
...@@ -580,13 +580,13 @@ void toku_brtnode_free (BRTNODE *nodep) { ...@@ -580,13 +580,13 @@ void toku_brtnode_free (BRTNODE *nodep) {
static void static void
brtheader_destroy(struct brt_header *h) { brtheader_destroy(struct brt_header *h) {
if (!h->panic) assert(!h->checkpoint_header); if (!h->panic) lazy_assert(!h->checkpoint_header);
//header and checkpoint_header have same Blocktable pointer //header and checkpoint_header have same Blocktable pointer
//cannot destroy since it is still in use by CURRENT //cannot destroy since it is still in use by CURRENT
if (h->type == BRTHEADER_CHECKPOINT_INPROGRESS) h->blocktable = NULL; if (h->type == BRTHEADER_CHECKPOINT_INPROGRESS) h->blocktable = NULL;
else { else {
assert(h->type == BRTHEADER_CURRENT); lazy_assert(h->type == BRTHEADER_CURRENT);
toku_blocktable_destroy(&h->blocktable); toku_blocktable_destroy(&h->blocktable);
if (h->descriptor.dbt.data) toku_free(h->descriptor.dbt.data); if (h->descriptor.dbt.data) toku_free(h->descriptor.dbt.data);
} }
...@@ -596,7 +596,7 @@ static int ...@@ -596,7 +596,7 @@ static int
brtheader_alloc(struct brt_header **hh) { brtheader_alloc(struct brt_header **hh) {
int r = 0; int r = 0;
if ((CALLOC(*hh))==0) { if ((CALLOC(*hh))==0) {
assert(errno==ENOMEM); lazy_assert(errno==ENOMEM);
r = ENOMEM; r = ENOMEM;
} }
return r; return r;
...@@ -605,9 +605,9 @@ brtheader_alloc(struct brt_header **hh) { ...@@ -605,9 +605,9 @@ brtheader_alloc(struct brt_header **hh) {
// Make a copy of the header for the purpose of a checkpoint // Make a copy of the header for the purpose of a checkpoint
static void static void
brtheader_copy_for_checkpoint(struct brt_header *h, LSN checkpoint_lsn) { brtheader_copy_for_checkpoint(struct brt_header *h, LSN checkpoint_lsn) {
assert(h->type == BRTHEADER_CURRENT); lazy_assert(h->type == BRTHEADER_CURRENT);
assert(h->checkpoint_header == NULL); lazy_assert(h->checkpoint_header == NULL);
assert(h->panic==0); lazy_assert(h->panic==0);
struct brt_header* XMALLOC(ch); struct brt_header* XMALLOC(ch);
*ch = *h; //Do a shallow copy *ch = *h; //Do a shallow copy
...@@ -639,7 +639,7 @@ initialize_empty_brtnode (BRT t, BRTNODE n, BLOCKNUM nodename, int height, size_ ...@@ -639,7 +639,7 @@ initialize_empty_brtnode (BRT t, BRTNODE n, BLOCKNUM nodename, int height, size_
n->nodesize = t->h->nodesize; n->nodesize = t->h->nodesize;
n->flags = t->flags; n->flags = t->flags;
n->thisnodename = nodename; n->thisnodename = nodename;
assert(t->h->layout_version != 0); lazy_assert(t->h->layout_version != 0);
n->layout_version = t->h->layout_version; n->layout_version = t->h->layout_version;
n->layout_version_original = t->h->layout_version; n->layout_version_original = t->h->layout_version;
n->layout_version_read_from_disk = t->h->layout_version; n->layout_version_read_from_disk = t->h->layout_version;
...@@ -647,7 +647,7 @@ initialize_empty_brtnode (BRT t, BRTNODE n, BLOCKNUM nodename, int height, size_ ...@@ -647,7 +647,7 @@ initialize_empty_brtnode (BRT t, BRTNODE n, BLOCKNUM nodename, int height, size_
n->rand4fingerprint = random(); n->rand4fingerprint = random();
n->local_fingerprint = 0; n->local_fingerprint = 0;
n->dirty = 1; n->dirty = 1;
assert(height>=0); lazy_assert(height>=0);
if (height>0) { if (height>0) {
n->u.n.n_children = 0; n->u.n.n_children = 0;
n->u.n.totalchildkeylens = 0; n->u.n.totalchildkeylens = 0;
...@@ -658,17 +658,17 @@ initialize_empty_brtnode (BRT t, BRTNODE n, BLOCKNUM nodename, int height, size_ ...@@ -658,17 +658,17 @@ initialize_empty_brtnode (BRT t, BRTNODE n, BLOCKNUM nodename, int height, size_
n->u.l.leaf_stats = zero_estimates; n->u.l.leaf_stats = zero_estimates;
int r; int r;
r = toku_omt_create(&n->u.l.buffer); r = toku_omt_create(&n->u.l.buffer);
assert(r==0); lazy_assert_zero(r);
n->u.l.leaflock_pool = toku_cachefile_leaflock_pool(t->h->cf); n->u.l.leaflock_pool = toku_cachefile_leaflock_pool(t->h->cf);
r = toku_leaflock_borrow(n->u.l.leaflock_pool, &n->u.l.leaflock); r = toku_leaflock_borrow(n->u.l.leaflock_pool, &n->u.l.leaflock);
assert(r==0); lazy_assert_zero(r);
{ {
// mpsize = max(suggest_mpsize, mp_pool_size_for_nodesize) // mpsize = max(suggest_mpsize, mp_pool_size_for_nodesize)
size_t mpsize = mp_pool_size_for_nodesize(n->nodesize); size_t mpsize = mp_pool_size_for_nodesize(n->nodesize);
if (mpsize < suggest_mpsize) if (mpsize < suggest_mpsize)
mpsize = suggest_mpsize; mpsize = suggest_mpsize;
void *mp = toku_malloc(mpsize); void *mp = toku_malloc(mpsize);
assert(mp); lazy_assert(mp);
toku_mempool_init(&n->u.l.buffer_mempool, mp, mpsize); toku_mempool_init(&n->u.l.buffer_mempool, mp, mpsize);
} }
...@@ -693,7 +693,7 @@ brt_init_new_root(BRT brt, BRTNODE nodea, BRTNODE nodeb, DBT splitk, CACHEKEY *r ...@@ -693,7 +693,7 @@ brt_init_new_root(BRT brt, BRTNODE nodea, BRTNODE nodeb, DBT splitk, CACHEKEY *r
int new_height = nodea->height+1; int new_height = nodea->height+1;
BLOCKNUM newroot_diskoff; BLOCKNUM newroot_diskoff;
toku_allocate_blocknum(brt->h->blocktable, &newroot_diskoff, brt->h); toku_allocate_blocknum(brt->h->blocktable, &newroot_diskoff, brt->h);
assert(newroot); lazy_assert(newroot);
newroot->ever_been_written = 0; newroot->ever_been_written = 0;
*rootp=newroot_diskoff; *rootp=newroot_diskoff;
initialize_empty_brtnode (brt, newroot, newroot_diskoff, new_height, 0); initialize_empty_brtnode (brt, newroot, newroot_diskoff, new_height, 0);
...@@ -739,12 +739,12 @@ int toku_create_new_brtnode (BRT t, BRTNODE *result, int height, size_t mpsize) ...@@ -739,12 +739,12 @@ int toku_create_new_brtnode (BRT t, BRTNODE *result, int height, size_t mpsize)
int r; int r;
BLOCKNUM name; BLOCKNUM name;
toku_allocate_blocknum(t->h->blocktable, &name, t->h); toku_allocate_blocknum(t->h->blocktable, &name, t->h);
assert(n); lazy_assert(n);
assert(t->h->nodesize>0); lazy_assert(t->h->nodesize>0);
n->ever_been_written = 0; n->ever_been_written = 0;
initialize_empty_brtnode(t, n, name, height, mpsize); initialize_empty_brtnode(t, n, name, height, mpsize);
*result = n; *result = n;
assert(n->nodesize>0); lazy_assert(n->nodesize>0);
// n->brt = t; // n->brt = t;
//printf("%s:%d putting %p (%lld)\n", __FILE__, __LINE__, n, n->thisnodename); //printf("%s:%d putting %p (%lld)\n", __FILE__, __LINE__, n, n->thisnodename);
u_int32_t fullhash = toku_cachetable_hash(t->cf, n->thisnodename); u_int32_t fullhash = toku_cachetable_hash(t->cf, n->thisnodename);
...@@ -752,7 +752,7 @@ int toku_create_new_brtnode (BRT t, BRTNODE *result, int height, size_t mpsize) ...@@ -752,7 +752,7 @@ int toku_create_new_brtnode (BRT t, BRTNODE *result, int height, size_t mpsize)
r=toku_cachetable_put(t->cf, n->thisnodename, fullhash, r=toku_cachetable_put(t->cf, n->thisnodename, fullhash,
n, brtnode_memory_size(n), n, brtnode_memory_size(n),
toku_brtnode_flush_callback, toku_brtnode_fetch_callback, t->h); toku_brtnode_flush_callback, toku_brtnode_fetch_callback, t->h);
assert(r==0); lazy_assert_zero(r);
return 0; return 0;
} }
...@@ -774,14 +774,14 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk) ...@@ -774,14 +774,14 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk)
//printf("%s:%d splitting leaf %" PRIu64 " which is size %u (targetsize = %u)\n", __FILE__, __LINE__, node->thisnodename.b, toku_serialize_brtnode_size(node), node->nodesize); //printf("%s:%d splitting leaf %" PRIu64 " which is size %u (targetsize = %u)\n", __FILE__, __LINE__, node->thisnodename.b, toku_serialize_brtnode_size(node), node->nodesize);
assert(node->height==0); lazy_assert(node->height==0);
assert(t->h->nodesize>=node->nodesize); /* otherwise we might be in trouble because the nodesize shrank. */ lazy_assert(t->h->nodesize>=node->nodesize); /* otherwise we might be in trouble because the nodesize shrank. */
toku_create_new_brtnode(t, &B, 0, toku_mempool_get_size(&node->u.l.buffer_mempool)); toku_create_new_brtnode(t, &B, 0, toku_mempool_get_size(&node->u.l.buffer_mempool));
assert(B->nodesize>0); lazy_assert(B->nodesize>0);
assert(node->nodesize>0); lazy_assert(node->nodesize>0);
//printf("%s:%d A is at %lld\n", __FILE__, __LINE__, A->thisnodename); //printf("%s:%d A is at %lld\n", __FILE__, __LINE__, A->thisnodename);
//printf("%s:%d B is at %lld nodesize=%d\n", __FILE__, __LINE__, B->thisnodename, B->nodesize); //printf("%s:%d B is at %lld nodesize=%d\n", __FILE__, __LINE__, B->thisnodename, B->nodesize);
assert(node->height>0 || node->u.l.buffer!=0); lazy_assert(node->height>0 || node->u.l.buffer!=0);
toku_verify_all_in_mempool(node); toku_verify_all_in_mempool(node);
...@@ -789,7 +789,7 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk) ...@@ -789,7 +789,7 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk)
u_int32_t split_at = 0; u_int32_t split_at = 0;
{ {
OMTVALUE *MALLOC_N(n_leafentries, leafentries); OMTVALUE *MALLOC_N(n_leafentries, leafentries);
assert(leafentries); lazy_assert(leafentries);
toku_omt_iterate(node->u.l.buffer, fill_buf, leafentries); toku_omt_iterate(node->u.l.buffer, fill_buf, leafentries);
split_at = 0; split_at = 0;
{ {
...@@ -805,7 +805,7 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk) ...@@ -805,7 +805,7 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk)
// split near the right edge // split near the right edge
sumsofar = 0; sumsofar = 0;
for (i=n_leafentries-1; i>0; i--) { for (i=n_leafentries-1; i>0; i--) {
assert(toku_mempool_inrange(&node->u.l.buffer_mempool, leafentries[i], leafentry_memsize(leafentries[i]))); lazy_assert(toku_mempool_inrange(&node->u.l.buffer_mempool, leafentries[i], leafentry_memsize(leafentries[i])));
sumsofar += leafentry_disksize(leafentries[i]); sumsofar += leafentry_disksize(leafentries[i]);
if (sumlesizes - sumsofar <= node->nodesize) { if (sumlesizes - sumsofar <= node->nodesize) {
split_at = i; split_at = i;
...@@ -819,7 +819,7 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk) ...@@ -819,7 +819,7 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk)
// split in half // split in half
sumsofar = 0; sumsofar = 0;
for (i=n_leafentries-1; i>0; i--) { for (i=n_leafentries-1; i>0; i--) {
assert(toku_mempool_inrange(&node->u.l.buffer_mempool, leafentries[i], leafentry_memsize(leafentries[i]))); lazy_assert(toku_mempool_inrange(&node->u.l.buffer_mempool, leafentries[i], leafentry_memsize(leafentries[i])));
sumsofar += leafentry_disksize(leafentries[i]); sumsofar += leafentry_disksize(leafentries[i]);
if (sumsofar >= sumlesizes/2) { if (sumsofar >= sumlesizes/2) {
split_at = i; split_at = i;
...@@ -828,8 +828,8 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk) ...@@ -828,8 +828,8 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk)
} }
} }
//TODO: #1125 REMOVE DEBUG //TODO: #1125 REMOVE DEBUG
assert( sumsofar <= toku_mempool_get_size(&B ->u.l.buffer_mempool)); lazy_assert( sumsofar <= toku_mempool_get_size(&B ->u.l.buffer_mempool));
assert(sumlesizes - sumsofar <= toku_mempool_get_size(&node->u.l.buffer_mempool)); lazy_assert(sumlesizes - sumsofar <= toku_mempool_get_size(&node->u.l.buffer_mempool));
} }
// Now we know where we are going to break it // Now we know where we are going to break it
OMT old_omt = node->u.l.buffer; OMT old_omt = node->u.l.buffer;
...@@ -843,7 +843,7 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk) ...@@ -843,7 +843,7 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk)
for (i=split_at; i<n_leafentries; i++) { for (i=split_at; i<n_leafentries; i++) {
LEAFENTRY oldle = leafentries[i]; LEAFENTRY oldle = leafentries[i];
LEAFENTRY newle = toku_mempool_malloc(&B->u.l.buffer_mempool, leafentry_memsize(oldle), 1); LEAFENTRY newle = toku_mempool_malloc(&B->u.l.buffer_mempool, leafentry_memsize(oldle), 1);
assert(newle!=0); // it's a fresh mpool, so this should always work. lazy_assert(newle!=0); // it's a fresh mpool, so this should always work.
diff_est.nkeys++; diff_est.nkeys++;
diff_est.ndata++; diff_est.ndata++;
diff_est.dsize += le_keylen(oldle) + le_latest_vallen(oldle); diff_est.dsize += le_keylen(oldle) + le_latest_vallen(oldle);
...@@ -869,7 +869,7 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk) ...@@ -869,7 +869,7 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk)
} }
if ((r = toku_omt_create_from_sorted_array(&B->u.l.buffer, leafentries+split_at, n_leafentries-split_at))) return r; if ((r = toku_omt_create_from_sorted_array(&B->u.l.buffer, leafentries+split_at, n_leafentries-split_at))) return r;
if ((r = toku_omt_create_steal_sorted_array(&node->u.l.buffer, &leafentries, split_at, n_leafentries))) return r; if ((r = toku_omt_create_steal_sorted_array(&node->u.l.buffer, &leafentries, split_at, n_leafentries))) return r;
assert(leafentries==NULL); lazy_assert(leafentries==NULL);
toku_verify_all_in_mempool(node); toku_verify_all_in_mempool(node);
toku_verify_all_in_mempool(B); toku_verify_all_in_mempool(B);
...@@ -886,14 +886,14 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk) ...@@ -886,14 +886,14 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk)
memset(splitk, 0, sizeof *splitk); memset(splitk, 0, sizeof *splitk);
OMTVALUE lev = 0; OMTVALUE lev = 0;
r=toku_omt_fetch(node->u.l.buffer, toku_omt_size(node->u.l.buffer)-1, &lev, NULL); r=toku_omt_fetch(node->u.l.buffer, toku_omt_size(node->u.l.buffer)-1, &lev, NULL);
assert(r==0); // that fetch should have worked. lazy_assert_zero(r); // that fetch should have worked.
LEAFENTRY le=lev; LEAFENTRY le=lev;
splitk->size = le_keylen(le); splitk->size = le_keylen(le);
splitk->data = kv_pair_malloc(le_key(le), le_keylen(le), 0, 0); splitk->data = kv_pair_malloc(le_key(le), le_keylen(le), 0, 0);
splitk->flags=0; splitk->flags=0;
} }
assert(r == 0); lazy_assert(r == 0);
assert(node->height>0 || node->u.l.buffer!=0); lazy_assert(node->height>0 || node->u.l.buffer!=0);
/* Remove it from the cache table, and free its storage. */ /* Remove it from the cache table, and free its storage. */
//printf("%s:%d old pma = %p\n", __FILE__, __LINE__, node->u.l.buffer); //printf("%s:%d old pma = %p\n", __FILE__, __LINE__, node->u.l.buffer);
...@@ -922,9 +922,9 @@ brt_nonleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *spl ...@@ -922,9 +922,9 @@ brt_nonleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *spl
int n_children_in_a = old_n_children/2; int n_children_in_a = old_n_children/2;
int n_children_in_b = old_n_children-n_children_in_a; int n_children_in_b = old_n_children-n_children_in_a;
BRTNODE B; BRTNODE B;
assert(node->height>0); lazy_assert(node->height>0);
assert(node->u.n.n_children>=2); // Otherwise, how do we split? We need at least two children to split. */ lazy_assert(node->u.n.n_children>=2); // Otherwise, how do we split? We need at least two children to split. */
assert(t->h->nodesize>=node->nodesize); /* otherwise we might be in trouble because the nodesize shrank. */ lazy_assert(t->h->nodesize>=node->nodesize); /* otherwise we might be in trouble because the nodesize shrank. */
toku_create_new_brtnode(t, &B, node->height, 0); toku_create_new_brtnode(t, &B, node->height, 0);
MALLOC_N(n_children_in_b+1, B->u.n.childinfos); MALLOC_N(n_children_in_b+1, B->u.n.childinfos);
MALLOC_N(n_children_in_b, B->u.n.childkeys); MALLOC_N(n_children_in_b, B->u.n.childkeys);
...@@ -992,7 +992,7 @@ brt_nonleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *spl ...@@ -992,7 +992,7 @@ brt_nonleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *spl
// Delete a child, removing it's fingerprint, and also the preceeding pivot key. The child number must be > 0 // Delete a child, removing it's fingerprint, and also the preceeding pivot key. The child number must be > 0
{ {
assert(i>0); lazy_assert(i>0);
if (i>n_children_in_a) { if (i>n_children_in_a) {
B->u.n.childkeys[targchild-1] = node->u.n.childkeys[i-1]; B->u.n.childkeys[targchild-1] = node->u.n.childkeys[i-1];
B->u.n.totalchildkeylens += toku_brt_pivot_key_len(node->u.n.childkeys[i-1]); B->u.n.totalchildkeylens += toku_brt_pivot_key_len(node->u.n.childkeys[i-1]);
...@@ -1009,7 +1009,7 @@ brt_nonleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *spl ...@@ -1009,7 +1009,7 @@ brt_nonleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *spl
BNC_SUBTREE_ESTIMATES(B, targchild) = BNC_SUBTREE_ESTIMATES(node, i); BNC_SUBTREE_ESTIMATES(B, targchild) = BNC_SUBTREE_ESTIMATES(node, i);
BNC_SUBTREE_ESTIMATES(node, i) = zero_estimates; BNC_SUBTREE_ESTIMATES(node, i) = zero_estimates;
assert(BNC_NBYTESINBUF(node, i) == 0); lazy_assert(BNC_NBYTESINBUF(node, i) == 0);
} }
// Drop the n_children now (not earlier) so that we can do the fingerprint verification at any time. // Drop the n_children now (not earlier) so that we can do the fingerprint verification at any time.
...@@ -1035,8 +1035,8 @@ brt_nonleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *spl ...@@ -1035,8 +1035,8 @@ brt_nonleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *spl
*nodea = node; *nodea = node;
*nodeb = B; *nodeb = B;
assert(toku_serialize_brtnode_size(node) <= node->nodesize); lazy_assert(toku_serialize_brtnode_size(node) <= node->nodesize);
assert(toku_serialize_brtnode_size(B) <= B->nodesize); lazy_assert(toku_serialize_brtnode_size(B) <= B->nodesize);
return 0; return 0;
} }
...@@ -1054,11 +1054,11 @@ handle_split_of_child (BRT t, BRTNODE node, int childnum, ...@@ -1054,11 +1054,11 @@ handle_split_of_child (BRT t, BRTNODE node, int childnum,
DBT *splitk /* the data in the childsplitk is alloc'd and is consumed by this call. */ DBT *splitk /* the data in the childsplitk is alloc'd and is consumed by this call. */
) )
{ {
assert(node->height>0); lazy_assert(node->height>0);
assert(0 <= childnum && childnum < node->u.n.n_children); lazy_assert(0 <= childnum && childnum < node->u.n.n_children);
FIFO old_h = BNC_BUFFER(node,childnum); FIFO old_h = BNC_BUFFER(node,childnum);
int old_count = BNC_NBYTESINBUF(node, childnum); int old_count = BNC_NBYTESINBUF(node, childnum);
assert(old_count==0); lazy_assert(old_count==0);
int cnum; int cnum;
int r; int r;
...@@ -1086,7 +1086,7 @@ handle_split_of_child (BRT t, BRTNODE node, int childnum, ...@@ -1086,7 +1086,7 @@ handle_split_of_child (BRT t, BRTNODE node, int childnum,
} }
node->u.n.n_children++; node->u.n.n_children++;
assert(BNC_BLOCKNUM(node, childnum).b==childa->thisnodename.b); // use the same child lazy_assert(BNC_BLOCKNUM(node, childnum).b==childa->thisnodename.b); // use the same child
BNC_BLOCKNUM(node, childnum+1) = childb->thisnodename; BNC_BLOCKNUM(node, childnum+1) = childb->thisnodename;
BNC_HAVE_FULLHASH(node, childnum+1) = TRUE; BNC_HAVE_FULLHASH(node, childnum+1) = TRUE;
BNC_FULLHASH(node, childnum+1) = childb->fullhash; BNC_FULLHASH(node, childnum+1) = childb->fullhash;
...@@ -1095,9 +1095,9 @@ handle_split_of_child (BRT t, BRTNODE node, int childnum, ...@@ -1095,9 +1095,9 @@ handle_split_of_child (BRT t, BRTNODE node, int childnum,
BNC_SUBTREE_ESTIMATES (node, childnum+1)=zero_estimates; BNC_SUBTREE_ESTIMATES (node, childnum+1)=zero_estimates;
fixup_child_fingerprint(node, childnum, childa); fixup_child_fingerprint(node, childnum, childa);
fixup_child_fingerprint(node, childnum+1, childb); fixup_child_fingerprint(node, childnum+1, childb);
r=toku_fifo_create(&BNC_BUFFER(node,childnum+1)); assert(r==0); r=toku_fifo_create(&BNC_BUFFER(node,childnum+1)); lazy_assert_zero(r);
verify_local_fingerprint_nonleaf(node); // The fingerprint hasn't changed and everhything is still there. verify_local_fingerprint_nonleaf(node); // The fingerprint hasn't changed and everhything is still there.
r=toku_fifo_create(&BNC_BUFFER(node,childnum)); assert(r==0); // ??? SHould handle this error case r=toku_fifo_create(&BNC_BUFFER(node,childnum)); lazy_assert_zero(r); // ??? SHould handle this error case
BNC_NBYTESINBUF(node, childnum) = 0; BNC_NBYTESINBUF(node, childnum) = 0;
BNC_NBYTESINBUF(node, childnum+1) = 0; BNC_NBYTESINBUF(node, childnum+1) = 0;
...@@ -1112,7 +1112,7 @@ handle_split_of_child (BRT t, BRTNODE node, int childnum, ...@@ -1112,7 +1112,7 @@ handle_split_of_child (BRT t, BRTNODE node, int childnum,
for (cnum=node->u.n.n_children-2; cnum>childnum; cnum--) { for (cnum=node->u.n.n_children-2; cnum>childnum; cnum--) {
node->u.n.childkeys[cnum] = node->u.n.childkeys[cnum-1]; node->u.n.childkeys[cnum] = node->u.n.childkeys[cnum-1];
} }
//if (logger) assert((t->flags&TOKU_DB_DUPSORT)==0); // the setpivot is wrong for TOKU_DB_DUPSORT, so recovery will be broken. //if (logger) lazy_assert((t->flags&TOKU_DB_DUPSORT)==0); // the setpivot is wrong for TOKU_DB_DUPSORT, so recovery will be broken.
node->u.n.childkeys[childnum]= pivot; node->u.n.childkeys[childnum]= pivot;
node->u.n.totalchildkeylens += toku_brt_pivot_key_len(pivot); node->u.n.totalchildkeylens += toku_brt_pivot_key_len(pivot);
} }
...@@ -1142,9 +1142,9 @@ handle_split_of_child (BRT t, BRTNODE node, int childnum, ...@@ -1142,9 +1142,9 @@ handle_split_of_child (BRT t, BRTNODE node, int childnum,
VERIFY_NODE(t, childb); VERIFY_NODE(t, childb);
r=toku_unpin_brtnode(t, childa); r=toku_unpin_brtnode(t, childa);
assert(r==0); lazy_assert_zero(r);
r=toku_unpin_brtnode(t, childb); r=toku_unpin_brtnode(t, childb);
assert(r==0); lazy_assert_zero(r);
return 0; return 0;
} }
...@@ -1158,7 +1158,7 @@ brt_split_child (BRT t, BRTNODE node, int childnum, BOOL *did_react) ...@@ -1158,7 +1158,7 @@ brt_split_child (BRT t, BRTNODE node, int childnum, BOOL *did_react)
//for (i=0; i<node->u.n.n_children; i++) printf(" %" PRIu64, BNC_SUBTREE_LEAFENTRY_ESTIMATE(node, i)); //for (i=0; i<node->u.n.n_children; i++) printf(" %" PRIu64, BNC_SUBTREE_LEAFENTRY_ESTIMATE(node, i));
printf("\n"); printf("\n");
} }
assert(node->height>0); lazy_assert(node->height>0);
BRTNODE child; BRTNODE child;
if (BNC_NBYTESINBUF(node, childnum)>0) { if (BNC_NBYTESINBUF(node, childnum)>0) {
// I don't think this can happen, but it's easy to handle. Flush the child, and if no longer fissible, then return. // I don't think this can happen, but it's easy to handle. Flush the child, and if no longer fissible, then return.
...@@ -1179,7 +1179,7 @@ brt_split_child (BRT t, BRTNODE node, int childnum, BOOL *did_react) ...@@ -1179,7 +1179,7 @@ brt_split_child (BRT t, BRTNODE node, int childnum, BOOL *did_react)
t->h); t->h);
if (r!=0) return r; if (r!=0) return r;
child = childnode_v; child = childnode_v;
assert(child->thisnodename.b!=0); lazy_assert(child->thisnodename.b!=0);
VERIFY_NODE(t,child); VERIFY_NODE(t,child);
} }
...@@ -1191,11 +1191,11 @@ brt_split_child (BRT t, BRTNODE node, int childnum, BOOL *did_react) ...@@ -1191,11 +1191,11 @@ brt_split_child (BRT t, BRTNODE node, int childnum, BOOL *did_react)
// printf("%s:%d node %" PRIu64 "->u.n.n_children=%d height=%d\n", __FILE__, __LINE__, node->thisnodename.b, node->u.n.n_children, node->height); // printf("%s:%d node %" PRIu64 "->u.n.n_children=%d height=%d\n", __FILE__, __LINE__, node->thisnodename.b, node->u.n.n_children, node->height);
if (child->height==0) { if (child->height==0) {
int r = brtleaf_split(t, child, &nodea, &nodeb, &splitk); int r = brtleaf_split(t, child, &nodea, &nodeb, &splitk);
assert(r==0); // REMOVE LATER lazy_assert_zero(r); // REMOVE LATER
if (r!=0) return r; if (r!=0) return r;
} else { } else {
int r = brt_nonleaf_split(t, child, &nodea, &nodeb, &splitk); int r = brt_nonleaf_split(t, child, &nodea, &nodeb, &splitk);
assert(r==0); // REMOVE LATER lazy_assert_zero(r); // REMOVE LATER
if (r!=0) return r; if (r!=0) return r;
} }
// printf("%s:%d child did split\n", __FILE__, __LINE__); // printf("%s:%d child did split\n", __FILE__, __LINE__);
...@@ -1216,7 +1216,7 @@ static void ...@@ -1216,7 +1216,7 @@ static void
maybe_bump_nkeys (BRTNODE node, int direction) { maybe_bump_nkeys (BRTNODE node, int direction) {
int keybump=direction; int keybump=direction;
node->u.l.leaf_stats.nkeys += keybump;; node->u.l.leaf_stats.nkeys += keybump;;
assert(node->u.l.leaf_stats.exact); lazy_assert(node->u.l.leaf_stats.exact);
} }
static void static void
...@@ -1239,7 +1239,7 @@ brt_leaf_apply_clean_xids_once (BRTNODE node, LEAFENTRY le) ...@@ -1239,7 +1239,7 @@ brt_leaf_apply_clean_xids_once (BRTNODE node, LEAFENTRY le)
size_t olddisksize = oldmemsize; size_t olddisksize = oldmemsize;
#if ULE_DEBUG #if ULE_DEBUG
olddisksize = leafentry_disksize(le); olddisksize = leafentry_disksize(le);
assert(oldmemsize == olddisksize); lazy_assert(oldmemsize == olddisksize);
#endif #endif
u_int32_t old_crc = toku_le_crc(le); u_int32_t old_crc = toku_le_crc(le);
...@@ -1250,13 +1250,13 @@ brt_leaf_apply_clean_xids_once (BRTNODE node, LEAFENTRY le) ...@@ -1250,13 +1250,13 @@ brt_leaf_apply_clean_xids_once (BRTNODE node, LEAFENTRY le)
le_clean_xids(le, &newmemsize, &newdisksize); le_clean_xids(le, &newmemsize, &newdisksize);
#if ULE_DEBUG #if ULE_DEBUG
assert(newmemsize == leafentry_memsize(le)); lazy_assert(newmemsize == leafentry_memsize(le));
assert(newdisksize == leafentry_disksize(le)); lazy_assert(newdisksize == leafentry_disksize(le));
#endif #endif
//le_keylen + le_latest_vallen(le); does not change. No need to update leaf stats //le_keylen + le_latest_vallen(le); does not change. No need to update leaf stats
assert(newmemsize < oldmemsize); lazy_assert(newmemsize < oldmemsize);
size_t size_reclaimed = oldmemsize - newmemsize; size_t size_reclaimed = oldmemsize - newmemsize;
u_int8_t *p = NULL; u_int8_t *p = NULL;
#if ULE_DEBUG #if ULE_DEBUG
...@@ -1298,7 +1298,7 @@ brt_leaf_apply_full_promotion_once (BRTNODE node, LEAFENTRY le) ...@@ -1298,7 +1298,7 @@ brt_leaf_apply_full_promotion_once (BRTNODE node, LEAFENTRY le)
size_t olddisksize = oldmemsize; size_t olddisksize = oldmemsize;
#if ULE_DEBUG #if ULE_DEBUG
olddisksize = leafentry_disksize(le); olddisksize = leafentry_disksize(le);
assert(oldmemsize == olddisksize); lazy_assert(oldmemsize == olddisksize);
#endif #endif
u_int32_t old_crc = toku_le_crc(le); u_int32_t old_crc = toku_le_crc(le);
...@@ -1309,13 +1309,13 @@ brt_leaf_apply_full_promotion_once (BRTNODE node, LEAFENTRY le) ...@@ -1309,13 +1309,13 @@ brt_leaf_apply_full_promotion_once (BRTNODE node, LEAFENTRY le)
le_full_promotion(le, &newmemsize, &newdisksize); le_full_promotion(le, &newmemsize, &newdisksize);
#if ULE_DEBUG #if ULE_DEBUG
assert(newmemsize == leafentry_memsize(le)); lazy_assert(newmemsize == leafentry_memsize(le));
assert(newdisksize == leafentry_disksize(le)); lazy_assert(newdisksize == leafentry_disksize(le));
#endif #endif
//le_keylen + le_latest_vallen(le); does not change. No need to update leaf stats //le_keylen + le_latest_vallen(le); does not change. No need to update leaf stats
assert(newmemsize < oldmemsize); lazy_assert(newmemsize < oldmemsize);
size_t size_reclaimed = oldmemsize - newmemsize; size_t size_reclaimed = oldmemsize - newmemsize;
u_int8_t *p = NULL; u_int8_t *p = NULL;
#if ULE_DEBUG #if ULE_DEBUG
...@@ -1340,7 +1340,7 @@ brt_leaf_apply_full_promotion_once (BRTNODE node, LEAFENTRY le) ...@@ -1340,7 +1340,7 @@ brt_leaf_apply_full_promotion_once (BRTNODE node, LEAFENTRY le)
static void static void
maybe_do_implicit_promotion_on_query (BRT_CURSOR UU(brtcursor), LEAFENTRY UU(le)) { maybe_do_implicit_promotion_on_query (BRT_CURSOR UU(brtcursor), LEAFENTRY UU(le)) {
//Requires: le is not a provdel (Callers never call it unless not provdel). //Requires: le is not a provdel (Callers never call it unless not provdel).
//assert(!le_latest_is_del(le)); //Must be as fast as possible. Assert is superfluous. //lazy_assert(!le_latest_is_del(le)); //Must be as fast as possible. Assert is superfluous.
//Do implicit promotion on query if all of the following apply: //Do implicit promotion on query if all of the following apply:
// * !le_latest_is_del(le) - True by prerequisite. // * !le_latest_is_del(le) - True by prerequisite.
...@@ -1383,10 +1383,10 @@ brt_leaf_delete_leafentry (BRTNODE node, u_int32_t idx, LEAFENTRY le) ...@@ -1383,10 +1383,10 @@ brt_leaf_delete_leafentry (BRTNODE node, u_int32_t idx, LEAFENTRY le)
{ {
u_int32_t oldlen = le_latest_vallen(le) + le_keylen(le); u_int32_t oldlen = le_latest_vallen(le) + le_keylen(le);
assert(node->u.l.leaf_stats.dsize >= oldlen); lazy_assert(node->u.l.leaf_stats.dsize >= oldlen);
node->u.l.leaf_stats.dsize -= oldlen; node->u.l.leaf_stats.dsize -= oldlen;
} }
assert(node->u.l.leaf_stats.dsize < (1U<<31)); // make sure we didn't underflow lazy_assert(node->u.l.leaf_stats.dsize < (1U<<31)); // make sure we didn't underflow
node->u.l.leaf_stats.ndata --; node->u.l.leaf_stats.ndata --;
toku_mempool_mfree(&node->u.l.buffer_mempool, 0, leafentry_memsize(le)); // Must pass 0, since le may be no good any more. toku_mempool_mfree(&node->u.l.buffer_mempool, 0, leafentry_memsize(le)); // Must pass 0, since le may be no good any more.
...@@ -1422,7 +1422,7 @@ brt_leaf_apply_cmd_once (BRTNODE node, BRT_MSG cmd, ...@@ -1422,7 +1422,7 @@ brt_leaf_apply_cmd_once (BRTNODE node, BRT_MSG cmd,
r = apply_msg_to_leafentry(cmd, le, &newlen, &newdisksize, &new_le, node->u.l.buffer, &node->u.l.buffer_mempool, &maybe_free, snapshot_txnids, live_list_reverse); r = apply_msg_to_leafentry(cmd, le, &newlen, &newdisksize, &new_le, node->u.l.buffer, &node->u.l.buffer_mempool, &maybe_free, snapshot_txnids, live_list_reverse);
} }
if (r!=0) return r; if (r!=0) return r;
if (new_le) assert(newdisksize == leafentry_disksize(new_le)); if (new_le) lazy_assert(newdisksize == leafentry_disksize(new_le));
//printf("Applying command: %s xid=%lld ", unparse_cmd_type(cmd->type), (long long)cmd->xid); //printf("Applying command: %s xid=%lld ", unparse_cmd_type(cmd->type), (long long)cmd->xid);
//toku_print_BYTESTRING(stdout, cmd->u.id.key->size, cmd->u.id.key->data); //toku_print_BYTESTRING(stdout, cmd->u.id.key->size, cmd->u.id.key->data);
...@@ -1437,11 +1437,11 @@ brt_leaf_apply_cmd_once (BRTNODE node, BRT_MSG cmd, ...@@ -1437,11 +1437,11 @@ brt_leaf_apply_cmd_once (BRTNODE node, BRT_MSG cmd,
// If we are replacing a leafentry, then the counts on the estimates remain unchanged, but the size might change // If we are replacing a leafentry, then the counts on the estimates remain unchanged, but the size might change
{ {
u_int32_t oldlen = le_keylen(le) + le_latest_vallen(le); u_int32_t oldlen = le_keylen(le) + le_latest_vallen(le);
assert(node->u.l.leaf_stats.dsize >= oldlen); lazy_assert(node->u.l.leaf_stats.dsize >= oldlen);
assert(node->u.l.leaf_stats.dsize < (1U<<31)); // make sure we didn't underflow lazy_assert(node->u.l.leaf_stats.dsize < (1U<<31)); // make sure we didn't underflow
node->u.l.leaf_stats.dsize -= oldlen; node->u.l.leaf_stats.dsize -= oldlen;
node->u.l.leaf_stats.dsize += le_keylen(new_le) + le_latest_vallen(new_le); // add it in two pieces to avoid ugly overflow node->u.l.leaf_stats.dsize += le_keylen(new_le) + le_latest_vallen(new_le); // add it in two pieces to avoid ugly overflow
assert(node->u.l.leaf_stats.dsize < (1U<<31)); // make sure we didn't underflow lazy_assert(node->u.l.leaf_stats.dsize < (1U<<31)); // make sure we didn't underflow
} }
node->u.l.n_bytes_in_buffer -= OMT_ITEM_OVERHEAD + leafentry_disksize(le); node->u.l.n_bytes_in_buffer -= OMT_ITEM_OVERHEAD + leafentry_disksize(le);
...@@ -1475,7 +1475,7 @@ brt_leaf_apply_cmd_once (BRTNODE node, BRT_MSG cmd, ...@@ -1475,7 +1475,7 @@ brt_leaf_apply_cmd_once (BRTNODE node, BRT_MSG cmd,
node->local_fingerprint += node->rand4fingerprint*toku_le_crc(new_le); node->local_fingerprint += node->rand4fingerprint*toku_le_crc(new_le);
node->u.l.leaf_stats.dsize += le_latest_vallen(new_le) + le_keylen(new_le); node->u.l.leaf_stats.dsize += le_latest_vallen(new_le) + le_keylen(new_le);
assert(node->u.l.leaf_stats.dsize < (1U<<31)); // make sure we didn't underflow lazy_assert(node->u.l.leaf_stats.dsize < (1U<<31)); // make sure we didn't underflow
node->u.l.leaf_stats.ndata ++; node->u.l.leaf_stats.ndata ++;
// Look at the key to the left and the one to the right. If both are different then increment nkeys. // Look at the key to the left and the one to the right. If both are different then increment nkeys.
maybe_bump_nkeys(node, +1); maybe_bump_nkeys(node, +1);
...@@ -1503,7 +1503,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd, ...@@ -1503,7 +1503,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd,
// toku_pma_verify_fingerprint(node->u.l.buffer, node->rand4fingerprint, node->subtree_fingerprint); // toku_pma_verify_fingerprint(node->u.l.buffer, node->rand4fingerprint, node->subtree_fingerprint);
TOKULOGGER logger = toku_cachefile_logger(t->cf); TOKULOGGER logger = toku_cachefile_logger(t->cf);
VERIFY_NODE(t, node); VERIFY_NODE(t, node);
assert(node->height==0); lazy_assert(node->height==0);
LEAFENTRY storeddata; LEAFENTRY storeddata;
OMTVALUE storeddatav=NULL; OMTVALUE storeddatav=NULL;
...@@ -1589,14 +1589,14 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd, ...@@ -1589,14 +1589,14 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd,
//the omt than we started with and the next leafentry will be at the //the omt than we started with and the next leafentry will be at the
//same index as the deleted one. Otherwise, the next leafentry will //same index as the deleted one. Otherwise, the next leafentry will
//be at the next index (+1). //be at the next index (+1).
assert(num_leafentries_before == num_leafentries_after || lazy_assert(num_leafentries_before == num_leafentries_after ||
num_leafentries_before-1 == num_leafentries_after); num_leafentries_before-1 == num_leafentries_after);
if (num_leafentries_after==num_leafentries_before) idx++; //Not deleted, advance index. if (num_leafentries_after==num_leafentries_before) idx++; //Not deleted, advance index.
assert(idx <= num_leafentries_after); lazy_assert(idx <= num_leafentries_after);
if (idx == num_leafentries_after) break; //Reached the end of the leaf if (idx == num_leafentries_after) break; //Reached the end of the leaf
r = toku_omt_fetch(node->u.l.buffer, idx, &storeddatav, NULL); r = toku_omt_fetch(node->u.l.buffer, idx, &storeddatav, NULL);
assert(r==0); lazy_assert_zero(r);
} }
storeddata=storeddatav; storeddata=storeddatav;
{ // Continue only if the next record that we found has the same key. { // Continue only if the next record that we found has the same key.
...@@ -1616,7 +1616,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd, ...@@ -1616,7 +1616,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd,
omt_size = toku_omt_size(node->u.l.buffer); omt_size = toku_omt_size(node->u.l.buffer);
for (idx = 0; idx < omt_size; ) { for (idx = 0; idx < omt_size; ) {
r = toku_omt_fetch(node->u.l.buffer, idx, &storeddatav, NULL); r = toku_omt_fetch(node->u.l.buffer, idx, &storeddatav, NULL);
assert(r==0); lazy_assert_zero(r);
storeddata=storeddatav; storeddata=storeddatav;
int deleted = 0; int deleted = 0;
if (le_num_xids(storeddata)>0) { if (le_num_xids(storeddata)>0) {
...@@ -1643,7 +1643,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd, ...@@ -1643,7 +1643,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd,
else else
idx++; idx++;
} }
assert(toku_omt_size(node->u.l.buffer) == omt_size); lazy_assert(toku_omt_size(node->u.l.buffer) == omt_size);
break; break;
case BRT_OPTIMIZE: case BRT_OPTIMIZE:
...@@ -1652,7 +1652,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd, ...@@ -1652,7 +1652,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd,
omt_size = toku_omt_size(node->u.l.buffer); omt_size = toku_omt_size(node->u.l.buffer);
for (idx = 0; idx < omt_size; ) { for (idx = 0; idx < omt_size; ) {
r = toku_omt_fetch(node->u.l.buffer, idx, &storeddatav, NULL); r = toku_omt_fetch(node->u.l.buffer, idx, &storeddatav, NULL);
assert(r==0); lazy_assert_zero(r);
storeddata=storeddatav; storeddata=storeddatav;
int deleted = 0; int deleted = 0;
if (le_num_xids(storeddata) > 0) { //If already clean, nothing to do. if (le_num_xids(storeddata) > 0) { //If already clean, nothing to do.
...@@ -1660,7 +1660,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd, ...@@ -1660,7 +1660,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd,
if (r!=0) return r; if (r!=0) return r;
u_int32_t new_omt_size = toku_omt_size(node->u.l.buffer); u_int32_t new_omt_size = toku_omt_size(node->u.l.buffer);
if (new_omt_size != omt_size) { if (new_omt_size != omt_size) {
assert(new_omt_size+1 == omt_size); lazy_assert(new_omt_size+1 == omt_size);
//Item was deleted. //Item was deleted.
deleted = 1; deleted = 1;
} }
...@@ -1671,7 +1671,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd, ...@@ -1671,7 +1671,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd,
else else
idx++; idx++;
} }
assert(toku_omt_size(node->u.l.buffer) == omt_size); lazy_assert(toku_omt_size(node->u.l.buffer) == omt_size);
break; break;
case BRT_COMMIT_BROADCAST_TXN: case BRT_COMMIT_BROADCAST_TXN:
...@@ -1681,7 +1681,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd, ...@@ -1681,7 +1681,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd,
omt_size = toku_omt_size(node->u.l.buffer); omt_size = toku_omt_size(node->u.l.buffer);
for (idx = 0; idx < omt_size; ) { for (idx = 0; idx < omt_size; ) {
r = toku_omt_fetch(node->u.l.buffer, idx, &storeddatav, NULL); r = toku_omt_fetch(node->u.l.buffer, idx, &storeddatav, NULL);
assert(r==0); lazy_assert_zero(r);
storeddata=storeddatav; storeddata=storeddatav;
int deleted = 0; int deleted = 0;
if (le_has_xids(storeddata, cmd->xids)) { if (le_has_xids(storeddata, cmd->xids)) {
...@@ -1689,7 +1689,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd, ...@@ -1689,7 +1689,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd,
if (r!=0) return r; if (r!=0) return r;
u_int32_t new_omt_size = toku_omt_size(node->u.l.buffer); u_int32_t new_omt_size = toku_omt_size(node->u.l.buffer);
if (new_omt_size != omt_size) { if (new_omt_size != omt_size) {
assert(new_omt_size+1 == omt_size); lazy_assert(new_omt_size+1 == omt_size);
//Item was deleted. //Item was deleted.
deleted = 1; deleted = 1;
} }
...@@ -1700,7 +1700,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd, ...@@ -1700,7 +1700,7 @@ brt_leaf_put_cmd (BRT t, BRTNODE node, BRT_MSG cmd,
else else
idx++; idx++;
} }
assert(toku_omt_size(node->u.l.buffer) == omt_size); lazy_assert(toku_omt_size(node->u.l.buffer) == omt_size);
break; break;
...@@ -1745,7 +1745,7 @@ static int brt_nonleaf_cmd_once_to_child (BRT t, BRTNODE node, unsigned int chil ...@@ -1745,7 +1745,7 @@ static int brt_nonleaf_cmd_once_to_child (BRT t, BRTNODE node, unsigned int chil
verify_local_fingerprint_nonleaf(child); verify_local_fingerprint_nonleaf(child);
int rr = toku_unpin_brtnode(t, child); int rr = toku_unpin_brtnode(t, child);
assert(rr==0); lazy_assert_zero(rr);
verify_local_fingerprint_nonleaf(node); verify_local_fingerprint_nonleaf(node);
...@@ -1763,7 +1763,7 @@ static int brt_nonleaf_cmd_once_to_child (BRT t, BRTNODE node, unsigned int chil ...@@ -1763,7 +1763,7 @@ static int brt_nonleaf_cmd_once_to_child (BRT t, BRTNODE node, unsigned int chil
node->local_fingerprint += node->rand4fingerprint * toku_calc_fingerprint_cmd(type, cmd->xids, k->data, k->size, v->data, v->size); node->local_fingerprint += node->rand4fingerprint * toku_calc_fingerprint_cmd(type, cmd->xids, k->data, k->size, v->data, v->size);
int diff = k->size + v->size + KEY_VALUE_OVERHEAD + BRT_CMD_OVERHEAD + xids_get_serialize_size(cmd->xids); int diff = k->size + v->size + KEY_VALUE_OVERHEAD + BRT_CMD_OVERHEAD + xids_get_serialize_size(cmd->xids);
int r=toku_fifo_enq(BNC_BUFFER(node,childnum), k->data, k->size, v->data, v->size, type, cmd->xids); int r=toku_fifo_enq(BNC_BUFFER(node,childnum), k->data, k->size, v->data, v->size, type, cmd->xids);
assert(r==0); lazy_assert_zero(r);
node->u.n.n_bytes_in_buffers += diff; node->u.n.n_bytes_in_buffers += diff;
BNC_NBYTESINBUF(node, childnum) += diff; BNC_NBYTESINBUF(node, childnum) += diff;
node->dirty = 1; node->dirty = 1;
...@@ -1776,7 +1776,7 @@ static int brt_nonleaf_cmd_once_to_child (BRT t, BRTNODE node, unsigned int chil ...@@ -1776,7 +1776,7 @@ static int brt_nonleaf_cmd_once_to_child (BRT t, BRTNODE node, unsigned int chil
/* find the leftmost child that may contain the key */ /* find the leftmost child that may contain the key */
unsigned int toku_brtnode_which_child (BRTNODE node , DBT *k, BRT t) { unsigned int toku_brtnode_which_child (BRTNODE node , DBT *k, BRT t) {
assert(node->height>0); lazy_assert(node->height>0);
#define DO_PIVOT_SEARCH_LR 0 #define DO_PIVOT_SEARCH_LR 0
#if DO_PIVOT_SEARCH_LR #if DO_PIVOT_SEARCH_LR
int i; int i;
...@@ -1904,7 +1904,7 @@ static LEAFENTRY ...@@ -1904,7 +1904,7 @@ static LEAFENTRY
fetch_from_buf (OMT omt, u_int32_t idx) { fetch_from_buf (OMT omt, u_int32_t idx) {
OMTVALUE v = 0; OMTVALUE v = 0;
int r = toku_omt_fetch(omt, idx, &v, NULL); int r = toku_omt_fetch(omt, idx, &v, NULL);
assert(r==0); lazy_assert_zero(r);
return (LEAFENTRY)v; return (LEAFENTRY)v;
} }
...@@ -1918,11 +1918,11 @@ merge_leaf_nodes (BRTNODE a, BRTNODE b) { ...@@ -1918,11 +1918,11 @@ merge_leaf_nodes (BRTNODE a, BRTNODE b) {
u_int32_t le_crc = toku_le_crc(le); u_int32_t le_crc = toku_le_crc(le);
{ {
LEAFENTRY new_le = mempool_malloc_from_omt(omta, &a->u.l.buffer_mempool, le_size, 0); LEAFENTRY new_le = mempool_malloc_from_omt(omta, &a->u.l.buffer_mempool, le_size, 0);
assert(new_le); lazy_assert(new_le);
memcpy(new_le, le, le_size); memcpy(new_le, le, le_size);
int idx = toku_omt_size(a->u.l.buffer); int idx = toku_omt_size(a->u.l.buffer);
int r = toku_omt_insert_at(omta, new_le, idx); int r = toku_omt_insert_at(omta, new_le, idx);
assert(r==0); lazy_assert_zero(r);
a->u.l.n_bytes_in_buffer += OMT_ITEM_OVERHEAD + le_size; //This should be disksize a->u.l.n_bytes_in_buffer += OMT_ITEM_OVERHEAD + le_size; //This should be disksize
a->local_fingerprint += a->rand4fingerprint * le_crc; a->local_fingerprint += a->rand4fingerprint * le_crc;
...@@ -1934,16 +1934,16 @@ merge_leaf_nodes (BRTNODE a, BRTNODE b) { ...@@ -1934,16 +1934,16 @@ merge_leaf_nodes (BRTNODE a, BRTNODE b) {
{ {
maybe_bump_nkeys(b, -1); maybe_bump_nkeys(b, -1);
int r = toku_omt_delete_at(omtb, 0); int r = toku_omt_delete_at(omtb, 0);
assert(r==0); lazy_assert_zero(r);
b->u.l.n_bytes_in_buffer -= OMT_ITEM_OVERHEAD + le_size; b->u.l.n_bytes_in_buffer -= OMT_ITEM_OVERHEAD + le_size;
b->local_fingerprint -= b->rand4fingerprint * le_crc; b->local_fingerprint -= b->rand4fingerprint * le_crc;
b->u.l.leaf_stats.ndata--; b->u.l.leaf_stats.ndata--;
b->u.l.leaf_stats.dsize-= le_keylen(le) + le_latest_vallen(le); b->u.l.leaf_stats.dsize-= le_keylen(le) + le_latest_vallen(le);
//printf("%s:%d Subed %u got %lu\n", __FILE__, __LINE__, le_keylen(le)+le_latest_vallen(le), b->u.l.leaf_stats.dsize); //printf("%s:%d Subed %u got %lu\n", __FILE__, __LINE__, le_keylen(le)+le_latest_vallen(le), b->u.l.leaf_stats.dsize);
assert(b->u.l.leaf_stats.ndata < 1U<<31); lazy_assert(b->u.l.leaf_stats.ndata < 1U<<31);
assert(b->u.l.leaf_stats.nkeys < 1U<<31); lazy_assert(b->u.l.leaf_stats.nkeys < 1U<<31);
assert(b->u.l.leaf_stats.dsize < 1U<<31); lazy_assert(b->u.l.leaf_stats.dsize < 1U<<31);
toku_mempool_mfree(&b->u.l.buffer_mempool, 0, le_size); toku_mempool_mfree(&b->u.l.buffer_mempool, 0, le_size);
} }
...@@ -1964,7 +1964,7 @@ balance_leaf_nodes (BRTNODE a, BRTNODE b, struct kv_pair **splitk) ...@@ -1964,7 +1964,7 @@ balance_leaf_nodes (BRTNODE a, BRTNODE b, struct kv_pair **splitk)
BRTNODE to = move_from_right ? a : b; BRTNODE to = move_from_right ? a : b;
OMT omtfrom = from->u.l.buffer; OMT omtfrom = from->u.l.buffer;
OMT omtto = to ->u.l.buffer; OMT omtto = to ->u.l.buffer;
assert(toku_serialize_brtnode_size(to) <= toku_serialize_brtnode_size(from)); // Could be equal in some screwy cases. lazy_assert(toku_serialize_brtnode_size(to) <= toku_serialize_brtnode_size(from)); // Could be equal in some screwy cases.
while (toku_serialize_brtnode_size(to) < toku_serialize_brtnode_size(from) while (toku_serialize_brtnode_size(to) < toku_serialize_brtnode_size(from)
&& &&
toku_omt_size(omtfrom)>1 // don't keep rebalancing if there's only one key in the from. toku_omt_size(omtfrom)>1 // don't keep rebalancing if there's only one key in the from.
...@@ -1976,10 +1976,10 @@ balance_leaf_nodes (BRTNODE a, BRTNODE b, struct kv_pair **splitk) ...@@ -1976,10 +1976,10 @@ balance_leaf_nodes (BRTNODE a, BRTNODE b, struct kv_pair **splitk)
u_int32_t le_crc = toku_le_crc(le); u_int32_t le_crc = toku_le_crc(le);
{ {
LEAFENTRY new_le = mempool_malloc_from_omt(omtto, &to->u.l.buffer_mempool, le_size, 0); LEAFENTRY new_le = mempool_malloc_from_omt(omtto, &to->u.l.buffer_mempool, le_size, 0);
assert(new_le); lazy_assert(new_le);
memcpy(new_le, le, le_size); memcpy(new_le, le, le_size);
int r = toku_omt_insert_at(omtto, new_le, to_idx); int r = toku_omt_insert_at(omtto, new_le, to_idx);
assert(r==0); lazy_assert_zero(r);
maybe_bump_nkeys(to, +1); maybe_bump_nkeys(to, +1);
to ->u.l.n_bytes_in_buffer += OMT_ITEM_OVERHEAD + le_size; to ->u.l.n_bytes_in_buffer += OMT_ITEM_OVERHEAD + le_size;
to ->local_fingerprint += to->rand4fingerprint * le_crc; to ->local_fingerprint += to->rand4fingerprint * le_crc;
...@@ -1991,21 +1991,21 @@ balance_leaf_nodes (BRTNODE a, BRTNODE b, struct kv_pair **splitk) ...@@ -1991,21 +1991,21 @@ balance_leaf_nodes (BRTNODE a, BRTNODE b, struct kv_pair **splitk)
{ {
maybe_bump_nkeys(from, -1); maybe_bump_nkeys(from, -1);
int r = toku_omt_delete_at(omtfrom, from_idx); int r = toku_omt_delete_at(omtfrom, from_idx);
assert(r==0); lazy_assert_zero(r);
from->u.l.n_bytes_in_buffer -= OMT_ITEM_OVERHEAD + le_size; from->u.l.n_bytes_in_buffer -= OMT_ITEM_OVERHEAD + le_size;
from->local_fingerprint -= from->rand4fingerprint * le_crc; from->local_fingerprint -= from->rand4fingerprint * le_crc;
from->u.l.leaf_stats.ndata--; from->u.l.leaf_stats.ndata--;
from->u.l.leaf_stats.dsize-= le_keylen(le) + le_latest_vallen(le); from->u.l.leaf_stats.dsize-= le_keylen(le) + le_latest_vallen(le);
assert(from->u.l.leaf_stats.ndata < 1U<<31); lazy_assert(from->u.l.leaf_stats.ndata < 1U<<31);
assert(from->u.l.leaf_stats.nkeys < 1U<<31); lazy_assert(from->u.l.leaf_stats.nkeys < 1U<<31);
//printf("%s:%d Removed %u get %lu\n", __FILE__, __LINE__, le_keylen(le)+ le_latest_vallen(le), from->u.l.leaf_stats.dsize); //printf("%s:%d Removed %u get %lu\n", __FILE__, __LINE__, le_keylen(le)+ le_latest_vallen(le), from->u.l.leaf_stats.dsize);
toku_mempool_mfree(&from->u.l.buffer_mempool, 0, le_size); toku_mempool_mfree(&from->u.l.buffer_mempool, 0, le_size);
} }
} }
assert(from->u.l.leaf_stats.dsize < 1U<<31); lazy_assert(from->u.l.leaf_stats.dsize < 1U<<31);
assert(toku_omt_size(a->u.l.buffer)>0); lazy_assert(toku_omt_size(a->u.l.buffer)>0);
{ {
LEAFENTRY le = fetch_from_buf(a->u.l.buffer, toku_omt_size(a->u.l.buffer)-1); LEAFENTRY le = fetch_from_buf(a->u.l.buffer, toku_omt_size(a->u.l.buffer)-1);
*splitk = kv_pair_malloc(le_key(le), le_keylen(le), 0, 0); *splitk = kv_pair_malloc(le_key(le), le_keylen(le), 0, 0);
...@@ -2062,7 +2062,7 @@ maybe_merge_pinned_nonleaf_nodes (BRTNODE parent, int childnum_of_parent, struct ...@@ -2062,7 +2062,7 @@ maybe_merge_pinned_nonleaf_nodes (BRTNODE parent, int childnum_of_parent, struct
BOOL *did_merge, BOOL *did_rebalance, struct kv_pair **splitk) BOOL *did_merge, BOOL *did_rebalance, struct kv_pair **splitk)
{ {
verify_local_fingerprint_nonleaf(a); verify_local_fingerprint_nonleaf(a);
assert(parent_splitk); lazy_assert(parent_splitk);
int old_n_children = a->u.n.n_children; int old_n_children = a->u.n.n_children;
int new_n_children = old_n_children + b->u.n.n_children; int new_n_children = old_n_children + b->u.n.n_children;
XREALLOC_N(new_n_children, a->u.n.childinfos); XREALLOC_N(new_n_children, a->u.n.childinfos);
...@@ -2131,7 +2131,7 @@ maybe_merge_pinned_nodes (BRTNODE parent, int childnum_of_parent, struct kv_pair ...@@ -2131,7 +2131,7 @@ maybe_merge_pinned_nodes (BRTNODE parent, int childnum_of_parent, struct kv_pair
// did_merge (OUT): Did the two nodes actually get merged? // did_merge (OUT): Did the two nodes actually get merged?
// splitk (OUT): If the two nodes did not get merged, the new pivot key between the two nodes. // splitk (OUT): If the two nodes did not get merged, the new pivot key between the two nodes.
{ {
assert(a->height == b->height); lazy_assert(a->height == b->height);
verify_local_fingerprint_nonleaf(a); verify_local_fingerprint_nonleaf(a);
parent->dirty = 1; // just to make sure parent->dirty = 1; // just to make sure
if (a->height == 0) { if (a->height == 0) {
...@@ -2157,11 +2157,11 @@ brt_merge_child (BRT t, BRTNODE node, int childnum_to_merge, BOOL *did_io, BOOL ...@@ -2157,11 +2157,11 @@ brt_merge_child (BRT t, BRTNODE node, int childnum_to_merge, BOOL *did_io, BOOL
childnuma = childnum_to_merge; childnuma = childnum_to_merge;
childnumb = childnum_to_merge+1; childnumb = childnum_to_merge+1;
} }
assert(0 <= childnuma); lazy_assert(0 <= childnuma);
assert(childnuma+1 == childnumb); lazy_assert(childnuma+1 == childnumb);
assert(childnumb < node->u.n.n_children); lazy_assert(childnumb < node->u.n.n_children);
assert(node->height>0); lazy_assert(node->height>0);
if (toku_fifo_n_entries(BNC_BUFFER(node,childnuma))>0) { if (toku_fifo_n_entries(BNC_BUFFER(node,childnuma))>0) {
enum reactivity re = RE_STABLE; enum reactivity re = RE_STABLE;
...@@ -2209,11 +2209,11 @@ brt_merge_child (BRT t, BRTNODE node, int childnum_to_merge, BOOL *did_io, BOOL ...@@ -2209,11 +2209,11 @@ brt_merge_child (BRT t, BRTNODE node, int childnum_to_merge, BOOL *did_io, BOOL
verify_local_fingerprint_nonleaf(childa); verify_local_fingerprint_nonleaf(childa);
r = maybe_merge_pinned_nodes(node, childnuma, node->u.n.childkeys[childnuma], childa, childb, &did_merge, &did_rebalance, &splitk_kvpair); r = maybe_merge_pinned_nodes(node, childnuma, node->u.n.childkeys[childnuma], childa, childb, &did_merge, &did_rebalance, &splitk_kvpair);
verify_local_fingerprint_nonleaf(childa); verify_local_fingerprint_nonleaf(childa);
if (childa->height>0) { int i; for (i=0; i+1<childa->u.n.n_children; i++) assert(childa->u.n.childkeys[i]); } if (childa->height>0) { int i; for (i=0; i+1<childa->u.n.n_children; i++) lazy_assert(childa->u.n.childkeys[i]); }
//(toku_verify_counts(childa), toku_verify_estimates(t,childa)); //(toku_verify_counts(childa), toku_verify_estimates(t,childa));
// the tree did react if a merge (did_merge) or rebalance (new spkit key) occurred // the tree did react if a merge (did_merge) or rebalance (new spkit key) occurred
*did_react = (BOOL)(did_merge || did_rebalance); *did_react = (BOOL)(did_merge || did_rebalance);
if (did_merge) assert(!splitk_kvpair); else assert(splitk_kvpair); if (did_merge) lazy_assert(!splitk_kvpair); else lazy_assert(splitk_kvpair);
if (r!=0) goto return_r; if (r!=0) goto return_r;
node->u.n.totalchildkeylens -= deleted_size; // The key was free()'d inside the maybe_merge_pinned_nodes. node->u.n.totalchildkeylens -= deleted_size; // The key was free()'d inside the maybe_merge_pinned_nodes.
...@@ -2232,13 +2232,13 @@ brt_merge_child (BRT t, BRTNODE node, int childnum_to_merge, BOOL *did_io, BOOL ...@@ -2232,13 +2232,13 @@ brt_merge_child (BRT t, BRTNODE node, int childnum_to_merge, BOOL *did_io, BOOL
(node->u.n.n_children-childnumb)*sizeof(node->u.n.childkeys[0])); (node->u.n.n_children-childnumb)*sizeof(node->u.n.childkeys[0]));
REALLOC_N(node->u.n.n_children-1, node->u.n.childkeys); REALLOC_N(node->u.n.n_children-1, node->u.n.childkeys);
fixup_child_fingerprint(node, childnuma, childa); fixup_child_fingerprint(node, childnuma, childa);
assert(node->u.n.childinfos[childnuma].blocknum.b == childa->thisnodename.b); lazy_assert(node->u.n.childinfos[childnuma].blocknum.b == childa->thisnodename.b);
verify_local_fingerprint_nonleaf(node); verify_local_fingerprint_nonleaf(node);
verify_local_fingerprint_nonleaf(childa); verify_local_fingerprint_nonleaf(childa);
childa->dirty = 1; // just to make sure childa->dirty = 1; // just to make sure
childb->dirty = 1; // just to make sure childb->dirty = 1; // just to make sure
} else { } else {
assert(splitk_kvpair); lazy_assert(splitk_kvpair);
// If we didn't merge the nodes, then we need the correct pivot. // If we didn't merge the nodes, then we need the correct pivot.
node->u.n.childkeys[childnuma] = splitk_kvpair; node->u.n.childkeys[childnuma] = splitk_kvpair;
node->u.n.totalchildkeylens += toku_brt_pivot_key_len(node->u.n.childkeys[childnuma]); node->u.n.totalchildkeylens += toku_brt_pivot_key_len(node->u.n.childkeys[childnuma]);
...@@ -2246,7 +2246,7 @@ brt_merge_child (BRT t, BRTNODE node, int childnum_to_merge, BOOL *did_io, BOOL ...@@ -2246,7 +2246,7 @@ brt_merge_child (BRT t, BRTNODE node, int childnum_to_merge, BOOL *did_io, BOOL
node->dirty = 1; node->dirty = 1;
} }
} }
assert(node->dirty); lazy_assert(node->dirty);
return_r: return_r:
// Unpin both, and return the first nonzero error code that is found // Unpin both, and return the first nonzero error code that is found
{ {
...@@ -2316,7 +2316,7 @@ static void find_heaviest_child (BRTNODE node, int *childnum) { ...@@ -2316,7 +2316,7 @@ static void find_heaviest_child (BRTNODE node, int *childnum) {
int i; int i;
if (0) printf("%s:%d weights: %d", __FILE__, __LINE__, max_weight); if (0) printf("%s:%d weights: %d", __FILE__, __LINE__, max_weight);
assert(node->u.n.n_children>0); lazy_assert(node->u.n.n_children>0);
for (i=1; i<node->u.n.n_children; i++) { for (i=1; i<node->u.n.n_children; i++) {
int this_weight = BNC_NBYTESINBUF(node,i); int this_weight = BNC_NBYTESINBUF(node,i);
if (0) printf(" %d", this_weight); if (0) printf(" %d", this_weight);
...@@ -2334,7 +2334,7 @@ flush_this_child (BRT t, BRTNODE node, int childnum, enum reactivity *child_re, ...@@ -2334,7 +2334,7 @@ flush_this_child (BRT t, BRTNODE node, int childnum, enum reactivity *child_re,
// Effect: Push everything in the CHILDNUMth buffer of node down into the child. // Effect: Push everything in the CHILDNUMth buffer of node down into the child.
// The child could end up reactive, and this function doesn't fix that. // The child could end up reactive, and this function doesn't fix that.
{ {
assert(node->height>0); lazy_assert(node->height>0);
BLOCKNUM targetchild = BNC_BLOCKNUM(node, childnum); BLOCKNUM targetchild = BNC_BLOCKNUM(node, childnum);
toku_verify_blocknum_allocated(t->h->blocktable, targetchild); toku_verify_blocknum_allocated(t->h->blocktable, targetchild);
u_int32_t childfullhash = compute_child_fullhash(t->cf, node, childnum); u_int32_t childfullhash = compute_child_fullhash(t->cf, node, childnum);
...@@ -2346,7 +2346,7 @@ flush_this_child (BRT t, BRTNODE node, int childnum, enum reactivity *child_re, ...@@ -2346,7 +2346,7 @@ flush_this_child (BRT t, BRTNODE node, int childnum, enum reactivity *child_re,
if (r!=0) return r; if (r!=0) return r;
child = childnode_v; child = childnode_v;
} }
assert(child->thisnodename.b!=0); lazy_assert(child->thisnodename.b!=0);
VERIFY_NODE(t, child); VERIFY_NODE(t, child);
int r = 0; int r = 0;
...@@ -2354,7 +2354,7 @@ flush_this_child (BRT t, BRTNODE node, int childnum, enum reactivity *child_re, ...@@ -2354,7 +2354,7 @@ flush_this_child (BRT t, BRTNODE node, int childnum, enum reactivity *child_re,
bytevec key,val; bytevec key,val;
ITEMLEN keylen, vallen; ITEMLEN keylen, vallen;
//printf("%s:%d Try random_pick, weight=%d \n", __FILE__, __LINE__, BNC_NBYTESINBUF(node, childnum)); //printf("%s:%d Try random_pick, weight=%d \n", __FILE__, __LINE__, BNC_NBYTESINBUF(node, childnum));
assert(toku_fifo_n_entries(BNC_BUFFER(node,childnum))>0); lazy_assert(toku_fifo_n_entries(BNC_BUFFER(node,childnum))>0);
u_int32_t type; u_int32_t type;
XIDS xids; XIDS xids;
while(0==toku_fifo_peek(BNC_BUFFER(node,childnum), &key, &keylen, &val, &vallen, &type, &xids)) { while(0==toku_fifo_peek(BNC_BUFFER(node,childnum), &key, &keylen, &val, &vallen, &type, &xids)) {
...@@ -2401,10 +2401,10 @@ flush_this_child (BRT t, BRTNODE node, int childnum, enum reactivity *child_re, ...@@ -2401,10 +2401,10 @@ flush_this_child (BRT t, BRTNODE node, int childnum, enum reactivity *child_re,
static int static int
flush_some_child (BRT t, BRTNODE node, enum reactivity re_array[], BOOL *did_io) flush_some_child (BRT t, BRTNODE node, enum reactivity re_array[], BOOL *did_io)
{ {
assert(node->height>0); lazy_assert(node->height>0);
int childnum; int childnum;
find_heaviest_child(node, &childnum); find_heaviest_child(node, &childnum);
assert(toku_fifo_n_entries(BNC_BUFFER(node, childnum))>0); lazy_assert(toku_fifo_n_entries(BNC_BUFFER(node, childnum))>0);
return flush_this_child (t, node, childnum, &re_array[childnum], did_io); return flush_this_child (t, node, childnum, &re_array[childnum], did_io);
} }
...@@ -2496,7 +2496,7 @@ static int push_something_at_root (BRT brt, BRTNODE *nodep, CACHEKEY *rootp, BRT ...@@ -2496,7 +2496,7 @@ static int push_something_at_root (BRT brt, BRTNODE *nodep, CACHEKEY *rootp, BRT
static void compute_and_fill_remembered_hash (BRT brt) { static void compute_and_fill_remembered_hash (BRT brt) {
struct remembered_hash *rh = &brt->h->root_hash; struct remembered_hash *rh = &brt->h->root_hash;
assert(brt->cf); // if cf is null, we'll be hosed. lazy_assert(brt->cf); // if cf is null, we'll be hosed.
rh->valid = TRUE; rh->valid = TRUE;
rh->fnum=toku_cachefile_filenum(brt->cf); rh->fnum=toku_cachefile_filenum(brt->cf);
rh->root=brt->h->root; rh->root=brt->h->root;
...@@ -2507,7 +2507,7 @@ static u_int32_t get_roothash (BRT brt) { ...@@ -2507,7 +2507,7 @@ static u_int32_t get_roothash (BRT brt) {
struct remembered_hash *rh = &brt->h->root_hash; struct remembered_hash *rh = &brt->h->root_hash;
BLOCKNUM root = brt->h->root; BLOCKNUM root = brt->h->root;
// compare cf first, since cf is NULL for invalid entries. // compare cf first, since cf is NULL for invalid entries.
assert(rh); lazy_assert(rh);
//printf("v=%d\n", rh->valid); //printf("v=%d\n", rh->valid);
if (rh->valid) { if (rh->valid) {
//printf("f=%d\n", rh->fnum.fileid); //printf("f=%d\n", rh->fnum.fileid);
...@@ -2532,13 +2532,13 @@ int toku_brt_root_put_cmd(BRT brt, BRT_MSG cmd) ...@@ -2532,13 +2532,13 @@ int toku_brt_root_put_cmd(BRT brt, BRT_MSG cmd)
BRTNODE node; BRTNODE node;
CACHEKEY *rootp; CACHEKEY *rootp;
int r; int r;
//assert(0==toku_cachetable_assert_all_unpinned(brt->cachetable)); //lazy_assert(0==toku_cachetable_assert_all_unpinned(brt->cachetable));
assert(brt->h); lazy_assert(brt->h);
brt->h->root_put_counter = global_root_put_counter++; brt->h->root_put_counter = global_root_put_counter++;
u_int32_t fullhash; u_int32_t fullhash;
rootp = toku_calculate_root_offset_pointer(brt, &fullhash); rootp = toku_calculate_root_offset_pointer(brt, &fullhash);
//assert(fullhash==toku_cachetable_hash(brt->cf, *rootp)); //lazy_assert(fullhash==toku_cachetable_hash(brt->cf, *rootp));
if ((r=toku_cachetable_get_and_pin(brt->cf, *rootp, fullhash, &node_v, NULL, if ((r=toku_cachetable_get_and_pin(brt->cf, *rootp, fullhash, &node_v, NULL,
toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h))) { toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h))) {
return r; return r;
...@@ -2547,7 +2547,7 @@ int toku_brt_root_put_cmd(BRT brt, BRT_MSG cmd) ...@@ -2547,7 +2547,7 @@ int toku_brt_root_put_cmd(BRT brt, BRT_MSG cmd)
node=node_v; node=node_v;
VERIFY_NODE(brt, node); VERIFY_NODE(brt, node);
assert(node->fullhash==fullhash); lazy_assert(node->fullhash==fullhash);
brt_verify_flags(brt, node); brt_verify_flags(brt, node);
verify_local_fingerprint_nonleaf(node); verify_local_fingerprint_nonleaf(node);
...@@ -2557,7 +2557,7 @@ int toku_brt_root_put_cmd(BRT brt, BRT_MSG cmd) ...@@ -2557,7 +2557,7 @@ int toku_brt_root_put_cmd(BRT brt, BRT_MSG cmd)
} }
verify_local_fingerprint_nonleaf(node); verify_local_fingerprint_nonleaf(node);
r = toku_unpin_brtnode(brt, node); r = toku_unpin_brtnode(brt, node);
assert(r == 0); lazy_assert(r == 0);
return 0; return 0;
} }
...@@ -2586,7 +2586,7 @@ int toku_brt_insert (BRT brt, DBT *key, DBT *val, TOKUTXN txn) { ...@@ -2586,7 +2586,7 @@ int toku_brt_insert (BRT brt, DBT *key, DBT *val, TOKUTXN txn) {
int int
toku_brt_load_recovery(TOKUTXN txn, char const * old_iname, char const * new_iname, int do_fsync, int do_log, LSN *load_lsn) { toku_brt_load_recovery(TOKUTXN txn, char const * old_iname, char const * new_iname, int do_fsync, int do_log, LSN *load_lsn) {
int r = 0; int r = 0;
assert(txn); lazy_assert(txn);
toku_txn_force_fsync_on_commit(txn); //If the txn commits, the commit MUST be in the log toku_txn_force_fsync_on_commit(txn); //If the txn commits, the commit MUST be in the log
//before the (old) file is actually unlinked //before the (old) file is actually unlinked
TOKULOGGER logger = toku_txn_logger(txn); TOKULOGGER logger = toku_txn_logger(txn);
...@@ -2641,8 +2641,8 @@ toku_brt_load(BRT brt, TOKUTXN txn, char const * new_iname, int do_fsync, LSN *l ...@@ -2641,8 +2641,8 @@ toku_brt_load(BRT brt, TOKUTXN txn, char const * new_iname, int do_fsync, LSN *l
int int
toku_brt_log_put_multiple (TOKUTXN txn, BRT src_brt, BRT *brts, int num_brts, const DBT *key, const DBT *val) { toku_brt_log_put_multiple (TOKUTXN txn, BRT src_brt, BRT *brts, int num_brts, const DBT *key, const DBT *val) {
int r = 0; int r = 0;
assert(txn); lazy_assert(txn);
assert(num_brts > 0); lazy_assert(num_brts > 0);
TOKULOGGER logger = toku_txn_logger(txn); TOKULOGGER logger = toku_txn_logger(txn);
if (logger) { if (logger) {
FILENUM fnums[num_brts]; FILENUM fnums[num_brts];
...@@ -2667,7 +2667,7 @@ toku_brt_log_put_multiple (TOKUTXN txn, BRT src_brt, BRT *brts, int num_brts, co ...@@ -2667,7 +2667,7 @@ toku_brt_log_put_multiple (TOKUTXN txn, BRT src_brt, BRT *brts, int num_brts, co
} }
int toku_brt_maybe_insert (BRT brt, DBT *key, DBT *val, TOKUTXN txn, BOOL oplsn_valid, LSN oplsn, int do_logging, enum brt_msg_type type) { int toku_brt_maybe_insert (BRT brt, DBT *key, DBT *val, TOKUTXN txn, BOOL oplsn_valid, LSN oplsn, int do_logging, enum brt_msg_type type) {
assert(type==BRT_INSERT || type==BRT_INSERT_NO_OVERWRITE); lazy_assert(type==BRT_INSERT || type==BRT_INSERT_NO_OVERWRITE);
int r = 0; int r = 0;
XIDS message_xids = xids_get_root_xids(); //By default use committed messages XIDS message_xids = xids_get_root_xids(); //By default use committed messages
TXNID xid = toku_txn_get_txnid(txn); TXNID xid = toku_txn_get_txnid(txn);
...@@ -2717,8 +2717,8 @@ int toku_brt_delete(BRT brt, DBT *key, TOKUTXN txn) { ...@@ -2717,8 +2717,8 @@ int toku_brt_delete(BRT brt, DBT *key, TOKUTXN txn) {
int int
toku_brt_log_del_multiple (TOKUTXN txn, BRT src_brt, BRT *brts, int num_brts, const DBT *key, const DBT *val) { toku_brt_log_del_multiple (TOKUTXN txn, BRT src_brt, BRT *brts, int num_brts, const DBT *key, const DBT *val) {
int r = 0; int r = 0;
assert(txn); lazy_assert(txn);
assert(num_brts > 0); lazy_assert(num_brts > 0);
TOKULOGGER logger = toku_txn_logger(txn); TOKULOGGER logger = toku_txn_logger(txn);
if (logger) { if (logger) {
FILENUM fnums[num_brts]; FILENUM fnums[num_brts];
...@@ -2790,7 +2790,7 @@ static int move_it (OMTVALUE lev, u_int32_t idx, void *v) { ...@@ -2790,7 +2790,7 @@ static int move_it (OMTVALUE lev, u_int32_t idx, void *v) {
struct omt_compressor_state *oc = v; struct omt_compressor_state *oc = v;
u_int32_t size = leafentry_memsize(le); u_int32_t size = leafentry_memsize(le);
LEAFENTRY newdata = toku_mempool_malloc(oc->new_kvspace, size, 1); LEAFENTRY newdata = toku_mempool_malloc(oc->new_kvspace, size, 1);
assert(newdata); // we do this on a fresh mempool, so nothing bad shouldhapepn lazy_assert(newdata); // we do this on a fresh mempool, so nothing bad shouldhapepn
memcpy(newdata, le, size); memcpy(newdata, le, size);
toku_omt_set_at(oc->omt, newdata, idx); toku_omt_set_at(oc->omt, newdata, idx);
return 0; return 0;
...@@ -2825,7 +2825,7 @@ mempool_malloc_from_omt(OMT omt, struct mempool *mp, size_t size, void **maybe_f ...@@ -2825,7 +2825,7 @@ mempool_malloc_from_omt(OMT omt, struct mempool *mp, size_t size, void **maybe_f
if (v==0) { if (v==0) {
if (0 == omt_compress_kvspace(omt, mp, size, maybe_free)) { if (0 == omt_compress_kvspace(omt, mp, size, maybe_free)) {
v = toku_mempool_malloc(mp, size, 1); v = toku_mempool_malloc(mp, size, 1);
assert(v); lazy_assert(v);
} }
} }
return v; return v;
...@@ -2843,8 +2843,8 @@ int toku_open_brt (const char *fname, int is_create, BRT *newbrt, int nodesize, ...@@ -2843,8 +2843,8 @@ int toku_open_brt (const char *fname, int is_create, BRT *newbrt, int nodesize,
r = toku_brt_create(&brt); r = toku_brt_create(&brt);
if (r != 0) if (r != 0)
return r; return r;
r = toku_brt_set_nodesize(brt, nodesize); assert(r==0); r = toku_brt_set_nodesize(brt, nodesize); lazy_assert_zero(r);
r = toku_brt_set_bt_compare(brt, compare_fun); assert(r==0); r = toku_brt_set_bt_compare(brt, compare_fun); lazy_assert_zero(r);
r = toku_brt_open(brt, fname, is_create, only_create, cachetable, txn, db); r = toku_brt_open(brt, fname, is_create, only_create, cachetable, txn, db);
if (r != 0) { if (r != 0) {
...@@ -2858,7 +2858,7 @@ int toku_open_brt (const char *fname, int is_create, BRT *newbrt, int nodesize, ...@@ -2858,7 +2858,7 @@ int toku_open_brt (const char *fname, int is_create, BRT *newbrt, int nodesize,
static int setup_initial_brt_root_node (BRT t, BLOCKNUM blocknum) { static int setup_initial_brt_root_node (BRT t, BLOCKNUM blocknum) {
int r; int r;
BRTNODE MALLOC(node); BRTNODE MALLOC(node);
assert(node); lazy_assert(node);
node->ever_been_written = 0; node->ever_been_written = 0;
//printf("%s:%d\n", __FILE__, __LINE__); //printf("%s:%d\n", __FILE__, __LINE__);
initialize_empty_brtnode(t, node, blocknum, 0, 0); initialize_empty_brtnode(t, node, blocknum, 0, 0);
...@@ -2894,7 +2894,7 @@ static int brt_create_file(BRT brt, const char *fname, int *fdp) { ...@@ -2894,7 +2894,7 @@ static int brt_create_file(BRT brt, const char *fname, int *fdp) {
int r; int r;
int fd; int fd;
fd = open(fname, O_RDWR | O_BINARY, mode); fd = open(fname, O_RDWR | O_BINARY, mode);
assert(fd==-1); lazy_assert(fd==-1);
if (errno != ENOENT) { if (errno != ENOENT) {
r = errno; r = errno;
return r; return r;
...@@ -2906,7 +2906,7 @@ static int brt_create_file(BRT brt, const char *fname, int *fdp) { ...@@ -2906,7 +2906,7 @@ static int brt_create_file(BRT brt, const char *fname, int *fdp) {
} }
r = toku_fsync_directory(fname); r = toku_fsync_directory(fname);
resource_assert(r == 0); resource_assert_zero(r);
*fdp = fd; *fdp = fd;
return 0; return 0;
...@@ -2920,7 +2920,7 @@ static int brt_open_file(const char *fname, int *fdp) { ...@@ -2920,7 +2920,7 @@ static int brt_open_file(const char *fname, int *fdp) {
fd = open(fname, O_RDWR | O_BINARY, mode); fd = open(fname, O_RDWR | O_BINARY, mode);
if (fd==-1) { if (fd==-1) {
r = errno; r = errno;
assert(r!=0); lazy_assert(r!=0);
return r; return r;
} }
*fdp = fd; *fdp = fd;
...@@ -2961,7 +2961,7 @@ static int ...@@ -2961,7 +2961,7 @@ static int
brt_init_header_partial (BRT t, TOKUTXN txn) { brt_init_header_partial (BRT t, TOKUTXN txn) {
int r; int r;
t->h->flags = t->flags; t->h->flags = t->flags;
if (t->h->cf!=NULL) assert(t->h->cf == t->cf); if (t->h->cf!=NULL) lazy_assert(t->h->cf == t->cf);
t->h->cf = t->cf; t->h->cf = t->cf;
t->h->nodesize=t->nodesize; t->h->nodesize=t->nodesize;
t->h->num_blocks_to_upgrade = 0; t->h->num_blocks_to_upgrade = 0;
...@@ -3161,9 +3161,9 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET ...@@ -3161,9 +3161,9 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET
WHEN_BRTTRACE(fprintf(stderr, "BRTTRACE: %s:%d toku_brt_open(%s, \"%s\", %d, %p, %d, %p)\n", WHEN_BRTTRACE(fprintf(stderr, "BRTTRACE: %s:%d toku_brt_open(%s, \"%s\", %d, %p, %d, %p)\n",
__FILE__, __LINE__, fname_in_env, dbname, is_create, newbrt, nodesize, cachetable)); __FILE__, __LINE__, fname_in_env, dbname, is_create, newbrt, nodesize, cachetable));
char *fname_in_cwd = toku_cachetable_get_fname_in_cwd(cachetable, fname_in_env); char *fname_in_cwd = toku_cachetable_get_fname_in_cwd(cachetable, fname_in_env);
if (0) { died0: if (fname_in_cwd) toku_free(fname_in_cwd); assert(r); return r; } if (0) { died0: if (fname_in_cwd) toku_free(fname_in_cwd); lazy_assert(r); return r; }
assert(is_create || !only_create); lazy_assert(is_create || !only_create);
t->db = db; t->db = db;
BOOL did_create = FALSE; BOOL did_create = FALSE;
FILENUM reserved_filenum = use_filenum; FILENUM reserved_filenum = use_filenum;
...@@ -3179,7 +3179,7 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET ...@@ -3179,7 +3179,7 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET
toku_cachetable_unreserve_filenum(cachetable, reserved_filenum); toku_cachetable_unreserve_filenum(cachetable, reserved_filenum);
goto died0; goto died0;
} }
if (use_reserved_filenum) assert(reserved_filenum.fileid == use_filenum.fileid); if (use_reserved_filenum) lazy_assert(reserved_filenum.fileid == use_filenum.fileid);
did_create = TRUE; did_create = TRUE;
mode_t mode = S_IRWXU|S_IRWXG|S_IRWXO; mode_t mode = S_IRWXU|S_IRWXG|S_IRWXO;
if (txn) { if (txn) {
...@@ -3206,7 +3206,7 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET ...@@ -3206,7 +3206,7 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET
toku_cachefile_close(&t->cf, 0, FALSE, ZERO_LSN); toku_cachefile_close(&t->cf, 0, FALSE, ZERO_LSN);
goto died1; goto died1;
} }
assert(t->nodesize>0); lazy_assert(t->nodesize>0);
//printf("%s:%d %d alloced\n", __FILE__, __LINE__, get_n_items_malloced()); toku_print_malloced_items(); //printf("%s:%d %d alloced\n", __FILE__, __LINE__, get_n_items_malloced()); toku_print_malloced_items();
if (0) { if (0) {
died_after_read_and_pin: died_after_read_and_pin:
...@@ -3223,7 +3223,7 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET ...@@ -3223,7 +3223,7 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET
goto died_after_read_and_pin; goto died_after_read_and_pin;
} }
else if (only_create) { else if (only_create) {
assert(r==0); lazy_assert_zero(r);
r = EEXIST; r = EEXIST;
goto died_after_read_and_pin; goto died_after_read_and_pin;
} }
...@@ -3270,11 +3270,11 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET ...@@ -3270,11 +3270,11 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET
else { else {
// dict_id is already in header // dict_id is already in header
if (use_reserved_dict_id) if (use_reserved_dict_id)
assert(t->h->dict_id.dictid == use_dictionary_id.dictid); lazy_assert(t->h->dict_id.dictid == use_dictionary_id.dictid);
} }
assert(t->h); lazy_assert(t->h);
assert(t->h->dict_id.dictid != DICTIONARY_ID_NONE.dictid); lazy_assert(t->h->dict_id.dictid != DICTIONARY_ID_NONE.dictid);
assert(t->h->dict_id.dictid < dict_id_serial); lazy_assert(t->h->dict_id.dictid < dict_id_serial);
r = toku_maybe_upgrade_brt(t); // possibly do some work to complete the version upgrade of brt r = toku_maybe_upgrade_brt(t); // possibly do some work to complete the version upgrade of brt
if (r!=0) goto died_after_read_and_pin; if (r!=0) goto died_after_read_and_pin;
...@@ -3284,10 +3284,10 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET ...@@ -3284,10 +3284,10 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET
if (r!=0) goto died_after_read_and_pin; if (r!=0) goto died_after_read_and_pin;
if (t->db) t->db->descriptor = &t->h->descriptor; if (t->db) t->db->descriptor = &t->h->descriptor;
if (txn_created) { if (txn_created) {
assert(txn); lazy_assert(txn);
toku_brt_header_suppress_rollbacks(t->h, txn); toku_brt_header_suppress_rollbacks(t->h, txn);
r = toku_txn_note_brt(txn, t); r = toku_txn_note_brt(txn, t);
assert(r==0); lazy_assert_zero(r);
} }
//Opening a brt may restore to previous checkpoint. Truncate if necessary. //Opening a brt may restore to previous checkpoint. Truncate if necessary.
...@@ -3304,7 +3304,7 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET ...@@ -3304,7 +3304,7 @@ brt_open(BRT t, const char *fname_in_env, int is_create, int only_create, CACHET
int int
toku_brt_open_recovery(BRT t, const char *fname_in_env, int is_create, int only_create, CACHETABLE cachetable, TOKUTXN txn, DB *db, FILENUM use_filenum) { toku_brt_open_recovery(BRT t, const char *fname_in_env, int is_create, int only_create, CACHETABLE cachetable, TOKUTXN txn, DB *db, FILENUM use_filenum) {
int r; int r;
assert(use_filenum.fileid != FILENUM_NONE.fileid); lazy_assert(use_filenum.fileid != FILENUM_NONE.fileid);
r = brt_open(t, fname_in_env, is_create, only_create, cachetable, r = brt_open(t, fname_in_env, is_create, only_create, cachetable,
txn, db, use_filenum, DICTIONARY_ID_NONE); txn, db, use_filenum, DICTIONARY_ID_NONE);
return r; return r;
...@@ -3324,25 +3324,25 @@ brt_open_for_redirect(BRT *new_brtp, const char *fname_in_env, TOKUTXN txn, BRT ...@@ -3324,25 +3324,25 @@ brt_open_for_redirect(BRT *new_brtp, const char *fname_in_env, TOKUTXN txn, BRT
int r; int r;
BRT t; BRT t;
struct brt_header *old_h = old_brt->h; struct brt_header *old_h = old_brt->h;
assert(old_h->dict_id.dictid != DICTIONARY_ID_NONE.dictid); lazy_assert(old_h->dict_id.dictid != DICTIONARY_ID_NONE.dictid);
r = toku_brt_create(&t); r = toku_brt_create(&t);
assert(r==0); lazy_assert_zero(r);
r = toku_brt_set_bt_compare(t, old_brt->compare_fun); r = toku_brt_set_bt_compare(t, old_brt->compare_fun);
assert(r==0); lazy_assert_zero(r);
r = toku_brt_set_nodesize(t, old_brt->nodesize); r = toku_brt_set_nodesize(t, old_brt->nodesize);
assert(r==0); lazy_assert_zero(r);
if (old_h->descriptor.version>0) { if (old_h->descriptor.version>0) {
r = toku_brt_set_descriptor(t, old_h->descriptor.version, &old_h->descriptor.dbt); r = toku_brt_set_descriptor(t, old_h->descriptor.version, &old_h->descriptor.dbt);
assert(r==0); lazy_assert_zero(r);
} }
CACHETABLE ct = toku_cachefile_get_cachetable(old_brt->cf); CACHETABLE ct = toku_cachefile_get_cachetable(old_brt->cf);
r = brt_open(t, fname_in_env, 0, 0, ct, txn, old_brt->db, FILENUM_NONE, old_h->dict_id); r = brt_open(t, fname_in_env, 0, 0, ct, txn, old_brt->db, FILENUM_NONE, old_h->dict_id);
assert(r==0); lazy_assert_zero(r);
if (old_h->descriptor.version==0) { if (old_h->descriptor.version==0) {
assert(t->h->descriptor.version == 0); lazy_assert(t->h->descriptor.version == 0);
} }
assert(t->h->dict_id.dictid == old_h->dict_id.dictid); lazy_assert(t->h->dict_id.dictid == old_h->dict_id.dictid);
assert(t->db == old_brt->db); lazy_assert(t->db == old_brt->db);
*new_brtp = t; *new_brtp = t;
return r; return r;
...@@ -3356,7 +3356,7 @@ static void (*callback_db_set_brt)(DB *db, BRT brt) = NULL; ...@@ -3356,7 +3356,7 @@ static void (*callback_db_set_brt)(DB *db, BRT brt) = NULL;
static void static void
brt_redirect_cursors (BRT brt_to, BRT brt_from) { brt_redirect_cursors (BRT brt_to, BRT brt_from) {
assert(brt_to->db == brt_from->db); lazy_assert(brt_to->db == brt_from->db);
while (!toku_list_empty(&brt_from->cursors)) { while (!toku_list_empty(&brt_from->cursors)) {
struct toku_list * c_list = toku_list_head(&brt_from->cursors); struct toku_list * c_list = toku_list_head(&brt_from->cursors);
BRT_CURSOR c = toku_list_struct(c_list, struct brt_cursor, cursors_link); BRT_CURSOR c = toku_list_struct(c_list, struct brt_cursor, cursors_link);
...@@ -3372,7 +3372,7 @@ brt_redirect_cursors (BRT brt_to, BRT brt_from) { ...@@ -3372,7 +3372,7 @@ brt_redirect_cursors (BRT brt_to, BRT brt_from) {
static void static void
brt_redirect_db (BRT brt_to, BRT brt_from) { brt_redirect_db (BRT brt_to, BRT brt_from) {
assert(brt_to->db == brt_from->db); lazy_assert(brt_to->db == brt_from->db);
callback_db_set_brt(brt_from->db, brt_to); callback_db_set_brt(brt_from->db, brt_to);
} }
...@@ -3381,8 +3381,8 @@ fake_db_brt_close_delayed(DB *db, u_int32_t UU(flags)) { ...@@ -3381,8 +3381,8 @@ fake_db_brt_close_delayed(DB *db, u_int32_t UU(flags)) {
BRT brt_to_close = db->api_internal; BRT brt_to_close = db->api_internal;
char *error_string = NULL; char *error_string = NULL;
int r = toku_close_brt(brt_to_close, &error_string); int r = toku_close_brt(brt_to_close, &error_string);
assert(r==0); lazy_assert_zero(r);
assert(error_string == NULL); lazy_assert(error_string == NULL);
toku_free(db); toku_free(db);
return 0; return 0;
} }
...@@ -3397,23 +3397,23 @@ toku_brt_header_close_redirected_brts(struct brt_header * h) { ...@@ -3397,23 +3397,23 @@ toku_brt_header_close_redirected_brts(struct brt_header * h) {
for (list = h->live_brts.next; list != &h->live_brts; list = list->next) { for (list = h->live_brts.next; list != &h->live_brts; list = list->next) {
num_brts++; num_brts++;
} }
assert(num_brts>0); lazy_assert(num_brts>0);
BRT brts[num_brts]; BRT brts[num_brts];
DB *dbs[num_brts]; DB *dbs[num_brts];
int which = 0; int which = 0;
for (list = h->live_brts.next; list != &h->live_brts; list = list->next) { for (list = h->live_brts.next; list != &h->live_brts; list = list->next) {
XCALLOC(dbs[which]); XCALLOC(dbs[which]);
brts[which] = toku_list_struct(list, struct brt, live_brt_link); brts[which] = toku_list_struct(list, struct brt, live_brt_link);
assert(!brts[which]->was_closed); lazy_assert(!brts[which]->was_closed);
dbs[which]->api_internal = brts[which]; dbs[which]->api_internal = brts[which];
brts[which]->db = dbs[which]; brts[which]->db = dbs[which];
which++; which++;
} }
assert(which == num_brts); lazy_assert(which == num_brts);
for (which = 0; which < num_brts; which++) { for (which = 0; which < num_brts; which++) {
int r; int r;
r = toku_brt_db_delay_closed(brts[which], dbs[which], fake_db_brt_close_delayed, 0); r = toku_brt_db_delay_closed(brts[which], dbs[which], fake_db_brt_close_delayed, 0);
assert(r==0); lazy_assert_zero(r);
} }
return 0; return 0;
} }
...@@ -3426,8 +3426,8 @@ static int ...@@ -3426,8 +3426,8 @@ static int
dictionary_redirect_internal(const char *dst_fname_in_env, struct brt_header *src_h, TOKUTXN txn, struct brt_header **dst_hp) { dictionary_redirect_internal(const char *dst_fname_in_env, struct brt_header *src_h, TOKUTXN txn, struct brt_header **dst_hp) {
int r; int r;
assert(toku_list_empty(&src_h->zombie_brts)); lazy_assert(toku_list_empty(&src_h->zombie_brts));
assert(!toku_list_empty(&src_h->live_brts)); lazy_assert(!toku_list_empty(&src_h->live_brts));
FILENUM src_filenum = toku_cachefile_filenum(src_h->cf); FILENUM src_filenum = toku_cachefile_filenum(src_h->cf);
FILENUM dst_filenum = FILENUM_NONE; FILENUM dst_filenum = FILENUM_NONE;
...@@ -3437,25 +3437,25 @@ dictionary_redirect_internal(const char *dst_fname_in_env, struct brt_header *sr ...@@ -3437,25 +3437,25 @@ dictionary_redirect_internal(const char *dst_fname_in_env, struct brt_header *sr
for (list = src_h->live_brts.next; list != &src_h->live_brts; list = list->next) { for (list = src_h->live_brts.next; list != &src_h->live_brts; list = list->next) {
BRT src_brt; BRT src_brt;
src_brt = toku_list_struct(list, struct brt, live_brt_link); src_brt = toku_list_struct(list, struct brt, live_brt_link);
assert(!src_brt->was_closed); lazy_assert(!src_brt->was_closed);
BRT dst_brt; BRT dst_brt;
r = brt_open_for_redirect(&dst_brt, dst_fname_in_env, txn, src_brt); r = brt_open_for_redirect(&dst_brt, dst_fname_in_env, txn, src_brt);
assert(r==0); lazy_assert_zero(r);
if (dst_filenum.fileid==FILENUM_NONE.fileid) { // if first time through loop if (dst_filenum.fileid==FILENUM_NONE.fileid) { // if first time through loop
dst_filenum = toku_cachefile_filenum(dst_brt->cf); dst_filenum = toku_cachefile_filenum(dst_brt->cf);
assert(dst_filenum.fileid!=FILENUM_NONE.fileid); lazy_assert(dst_filenum.fileid!=FILENUM_NONE.fileid);
assert(dst_filenum.fileid!=src_filenum.fileid); //Cannot be same file. lazy_assert(dst_filenum.fileid!=src_filenum.fileid); //Cannot be same file.
} }
else { // All dst_brts must have same filenum else { // All dst_brts must have same filenum
assert(dst_filenum.fileid == toku_cachefile_filenum(dst_brt->cf).fileid); lazy_assert(dst_filenum.fileid == toku_cachefile_filenum(dst_brt->cf).fileid);
} }
if (!dst_h) dst_h = dst_brt->h; if (!dst_h) dst_h = dst_brt->h;
else assert(dst_h == dst_brt->h); else lazy_assert(dst_h == dst_brt->h);
//Do not need to swap descriptors pointers. //Do not need to swap descriptors pointers.
//Done by brt_open_for_redirect //Done by brt_open_for_redirect
assert(dst_brt->db->descriptor == &dst_brt->h->descriptor); lazy_assert(dst_brt->db->descriptor == &dst_brt->h->descriptor);
//Set db->i->brt to new brt //Set db->i->brt to new brt
brt_redirect_db(dst_brt, src_brt); brt_redirect_db(dst_brt, src_brt);
...@@ -3463,10 +3463,10 @@ dictionary_redirect_internal(const char *dst_fname_in_env, struct brt_header *sr ...@@ -3463,10 +3463,10 @@ dictionary_redirect_internal(const char *dst_fname_in_env, struct brt_header *sr
//Move cursors. //Move cursors.
brt_redirect_cursors (dst_brt, src_brt); brt_redirect_cursors (dst_brt, src_brt);
} }
assert(dst_h); lazy_assert(dst_h);
r = toku_brt_header_close_redirected_brts(src_h); r = toku_brt_header_close_redirected_brts(src_h);
assert(r==0); lazy_assert_zero(r);
*dst_hp = dst_h; *dst_hp = dst_h;
return r; return r;
...@@ -3484,28 +3484,28 @@ toku_dictionary_redirect_abort(struct brt_header *old_h, struct brt_header *new_ ...@@ -3484,28 +3484,28 @@ toku_dictionary_redirect_abort(struct brt_header *old_h, struct brt_header *new_
{ {
FILENUM old_filenum = toku_cachefile_filenum(old_h->cf); FILENUM old_filenum = toku_cachefile_filenum(old_h->cf);
FILENUM new_filenum = toku_cachefile_filenum(new_h->cf); FILENUM new_filenum = toku_cachefile_filenum(new_h->cf);
assert(old_filenum.fileid!=new_filenum.fileid); //Cannot be same file. lazy_assert(old_filenum.fileid!=new_filenum.fileid); //Cannot be same file.
//No living brts in old header. //No living brts in old header.
assert(toku_list_empty(&old_h->live_brts)); lazy_assert(toku_list_empty(&old_h->live_brts));
//Must have a zombie in old header. //Must have a zombie in old header.
assert(!toku_list_empty(&old_h->zombie_brts)); lazy_assert(!toku_list_empty(&old_h->zombie_brts));
} }
// If application did not close all DBs using the new file, then there should // If application did not close all DBs using the new file, then there should
// be no zombies and we need to redirect the DBs back to the original file. // be no zombies and we need to redirect the DBs back to the original file.
if (!toku_list_empty(&new_h->live_brts)) { if (!toku_list_empty(&new_h->live_brts)) {
assert(toku_list_empty(&new_h->zombie_brts)); lazy_assert(toku_list_empty(&new_h->zombie_brts));
struct brt_header *dst_h; struct brt_header *dst_h;
// redirect back from new_h to old_h // redirect back from new_h to old_h
r = dictionary_redirect_internal(old_fname_in_env, new_h, txn, &dst_h); r = dictionary_redirect_internal(old_fname_in_env, new_h, txn, &dst_h);
assert(r==0); lazy_assert_zero(r);
assert(dst_h == old_h); lazy_assert(dst_h == old_h);
} }
else { else {
//No live brts. Zombies on both sides will die on their own eventually. //No live brts. Zombies on both sides will die on their own eventually.
//No need to redirect back. //No need to redirect back.
assert(!toku_list_empty(&new_h->zombie_brts)); lazy_assert(!toku_list_empty(&new_h->zombie_brts));
r = 0; r = 0;
} }
return r; return r;
...@@ -3562,39 +3562,39 @@ toku_dictionary_redirect (const char *dst_fname_in_env, BRT old_brt, TOKUTXN txn ...@@ -3562,39 +3562,39 @@ toku_dictionary_redirect (const char *dst_fname_in_env, BRT old_brt, TOKUTXN txn
r = EINVAL; r = EINVAL;
goto cleanup; goto cleanup;
} }
assert(r==ENOENT); lazy_assert(r==ENOENT);
r = 0; r = 0;
} }
if (txn) { if (txn) {
r = toku_txn_note_brt(txn, old_brt); // mark old brt as touched by this txn r = toku_txn_note_brt(txn, old_brt); // mark old brt as touched by this txn
assert(r==0); lazy_assert_zero(r);
} }
struct brt_header *new_h; struct brt_header *new_h;
r = dictionary_redirect_internal(dst_fname_in_env, old_h, txn, &new_h); r = dictionary_redirect_internal(dst_fname_in_env, old_h, txn, &new_h);
assert(r==0); lazy_assert_zero(r);
// make rollback log entry // make rollback log entry
if (txn) { if (txn) {
assert(toku_list_empty(&new_h->zombie_brts)); lazy_assert(toku_list_empty(&new_h->zombie_brts));
assert(!toku_list_empty(&new_h->live_brts)); lazy_assert(!toku_list_empty(&new_h->live_brts));
struct toku_list *list; struct toku_list *list;
for (list = new_h->live_brts.next; list != &new_h->live_brts; list = list->next) { for (list = new_h->live_brts.next; list != &new_h->live_brts; list = list->next) {
BRT new_brt; BRT new_brt;
new_brt = toku_list_struct(list, struct brt, live_brt_link); new_brt = toku_list_struct(list, struct brt, live_brt_link);
r = toku_txn_note_brt(txn, new_brt); // mark new brt as touched by this txn r = toku_txn_note_brt(txn, new_brt); // mark new brt as touched by this txn
assert(r==0); lazy_assert_zero(r);
} }
FILENUM old_filenum = toku_cachefile_filenum(old_h->cf); FILENUM old_filenum = toku_cachefile_filenum(old_h->cf);
FILENUM new_filenum = toku_cachefile_filenum(new_h->cf); FILENUM new_filenum = toku_cachefile_filenum(new_h->cf);
r = toku_logger_save_rollback_dictionary_redirect(txn, old_filenum, new_filenum); r = toku_logger_save_rollback_dictionary_redirect(txn, old_filenum, new_filenum);
assert(r==0); lazy_assert_zero(r);
TXNID xid = toku_txn_get_txnid(txn); TXNID xid = toku_txn_get_txnid(txn);
toku_brt_header_suppress_rollbacks(new_h, txn); toku_brt_header_suppress_rollbacks(new_h, txn);
r = toku_log_suppress_rollback(txn->logger, NULL, 0, new_filenum, xid); r = toku_log_suppress_rollback(txn->logger, NULL, 0, new_filenum, xid);
assert(r==0); lazy_assert_zero(r);
} }
cleanup: cleanup:
...@@ -3610,7 +3610,7 @@ toku_brt_get_dictionary_id(BRT brt) { ...@@ -3610,7 +3610,7 @@ toku_brt_get_dictionary_id(BRT brt) {
} }
int toku_brt_set_flags(BRT brt, unsigned int flags) { int toku_brt_set_flags(BRT brt, unsigned int flags) {
assert(flags==(flags&TOKU_DB_KEYCMP_BUILTIN)); // make sure there are no extranious flags lazy_assert(flags==(flags&TOKU_DB_KEYCMP_BUILTIN)); // make sure there are no extranious flags
brt->did_set_flags = TRUE; brt->did_set_flags = TRUE;
brt->flags = flags; brt->flags = flags;
return 0; return 0;
...@@ -3618,7 +3618,7 @@ int toku_brt_set_flags(BRT brt, unsigned int flags) { ...@@ -3618,7 +3618,7 @@ int toku_brt_set_flags(BRT brt, unsigned int flags) {
int toku_brt_get_flags(BRT brt, unsigned int *flags) { int toku_brt_get_flags(BRT brt, unsigned int *flags) {
*flags = brt->flags; *flags = brt->flags;
assert(brt->flags==(brt->flags&TOKU_DB_KEYCMP_BUILTIN)); // make sure there are no extraneous flags lazy_assert(brt->flags==(brt->flags&TOKU_DB_KEYCMP_BUILTIN)); // make sure there are no extraneous flags
return 0; return 0;
} }
...@@ -3664,8 +3664,8 @@ toku_brtheader_begin_checkpoint (CACHEFILE UU(cachefile), int UU(fd), LSN checkp ...@@ -3664,8 +3664,8 @@ toku_brtheader_begin_checkpoint (CACHEFILE UU(cachefile), int UU(fd), LSN checkp
if (r==0) { if (r==0) {
// hold lock around copying and clearing of dirty bit // hold lock around copying and clearing of dirty bit
toku_brtheader_lock (h); toku_brtheader_lock (h);
assert(h->type == BRTHEADER_CURRENT); lazy_assert(h->type == BRTHEADER_CURRENT);
assert(h->checkpoint_header == NULL); lazy_assert(h->checkpoint_header == NULL);
brtheader_copy_for_checkpoint(h, checkpoint_lsn); brtheader_copy_for_checkpoint(h, checkpoint_lsn);
h->dirty = 0; // this is only place this bit is cleared (in currentheader) h->dirty = 0; // this is only place this bit is cleared (in currentheader)
toku_block_translation_note_start_checkpoint_unlocked(h->blocktable); toku_block_translation_note_start_checkpoint_unlocked(h->blocktable);
...@@ -3694,11 +3694,11 @@ brtheader_note_pin_by_checkpoint (CACHEFILE UU(cachefile), void *header_v) ...@@ -3694,11 +3694,11 @@ brtheader_note_pin_by_checkpoint (CACHEFILE UU(cachefile), void *header_v)
} }
else { else {
//Header exists, so at least one brt must. No live means at least one zombie. //Header exists, so at least one brt must. No live means at least one zombie.
assert(!toku_list_empty(&h->zombie_brts)); lazy_assert(!toku_list_empty(&h->zombie_brts));
brt_to_pin = toku_list_struct(toku_list_head(&h->zombie_brts), struct brt, zombie_brt_link); brt_to_pin = toku_list_struct(toku_list_head(&h->zombie_brts), struct brt, zombie_brt_link);
} }
toku_brtheader_unlock(h); toku_brtheader_unlock(h);
assert(!brt_to_pin->pinned_by_checkpoint); lazy_assert(!brt_to_pin->pinned_by_checkpoint);
brt_to_pin->pinned_by_checkpoint = 1; brt_to_pin->pinned_by_checkpoint = 1;
return 0; return 0;
...@@ -3730,7 +3730,7 @@ brtheader_note_unpin_by_checkpoint (CACHEFILE UU(cachefile), void *header_v) ...@@ -3730,7 +3730,7 @@ brtheader_note_unpin_by_checkpoint (CACHEFILE UU(cachefile), void *header_v)
} }
if (!brt_to_unpin) { if (!brt_to_unpin) {
//Header exists, something is pinned, so exactly one zombie must be pinned //Header exists, something is pinned, so exactly one zombie must be pinned
assert(!toku_list_empty(&h->zombie_brts)); lazy_assert(!toku_list_empty(&h->zombie_brts));
struct toku_list *list; struct toku_list *list;
for (list = h->zombie_brts.next; list != &h->zombie_brts; list = list->next) { for (list = h->zombie_brts.next; list != &h->zombie_brts; list = list->next) {
BRT candidate; BRT candidate;
...@@ -3742,14 +3742,14 @@ brtheader_note_unpin_by_checkpoint (CACHEFILE UU(cachefile), void *header_v) ...@@ -3742,14 +3742,14 @@ brtheader_note_unpin_by_checkpoint (CACHEFILE UU(cachefile), void *header_v)
} }
} }
toku_brtheader_unlock(h); toku_brtheader_unlock(h);
assert(brt_to_unpin); lazy_assert(brt_to_unpin);
assert(brt_to_unpin->pinned_by_checkpoint); lazy_assert(brt_to_unpin->pinned_by_checkpoint);
brt_to_unpin->pinned_by_checkpoint = 0; //Unpin brt_to_unpin->pinned_by_checkpoint = 0; //Unpin
int r = 0; int r = 0;
//Close if necessary //Close if necessary
if (brt_to_unpin->was_closed && !toku_brt_zombie_needed(brt_to_unpin)) { if (brt_to_unpin->was_closed && !toku_brt_zombie_needed(brt_to_unpin)) {
//Close immediately. //Close immediately.
assert(brt_to_unpin->close_db); lazy_assert(brt_to_unpin->close_db);
r = brt_to_unpin->close_db(brt_to_unpin->db, brt_to_unpin->close_flags); r = brt_to_unpin->close_db(brt_to_unpin->db, brt_to_unpin->close_flags);
} }
return r; return r;
...@@ -3767,9 +3767,9 @@ toku_brtheader_checkpoint (CACHEFILE cf, int fd, void *header_v) ...@@ -3767,9 +3767,9 @@ toku_brtheader_checkpoint (CACHEFILE cf, int fd, void *header_v)
if (h->panic!=0) goto handle_error; if (h->panic!=0) goto handle_error;
//printf("%s:%d allocated_limit=%lu writing queue to %lu\n", __FILE__, __LINE__, //printf("%s:%d allocated_limit=%lu writing queue to %lu\n", __FILE__, __LINE__,
// block_allocator_allocated_limit(h->block_allocator), h->unused_blocks.b*h->nodesize); // block_allocator_allocated_limit(h->block_allocator), h->unused_blocks.b*h->nodesize);
assert(ch); lazy_assert(ch);
if (ch->panic!=0) goto handle_error; if (ch->panic!=0) goto handle_error;
assert(ch->type == BRTHEADER_CHECKPOINT_INPROGRESS); lazy_assert(ch->type == BRTHEADER_CHECKPOINT_INPROGRESS);
if (ch->dirty) { // this is only place this bit is tested (in checkpoint_header) if (ch->dirty) { // this is only place this bit is tested (in checkpoint_header)
TOKULOGGER logger = toku_cachefile_logger(cf); TOKULOGGER logger = toku_cachefile_logger(cf);
if (logger) { if (logger) {
...@@ -3808,7 +3808,7 @@ toku_brtheader_end_checkpoint (CACHEFILE cachefile, int fd, void *header_v) { ...@@ -3808,7 +3808,7 @@ toku_brtheader_end_checkpoint (CACHEFILE cachefile, int fd, void *header_v) {
struct brt_header *h = header_v; struct brt_header *h = header_v;
int r = h->panic; int r = h->panic;
if (r==0) { if (r==0) {
assert(h->type == BRTHEADER_CURRENT); lazy_assert(h->type == BRTHEADER_CURRENT);
struct brt_header *ch = h->checkpoint_header; struct brt_header *ch = h->checkpoint_header;
BOOL checkpoint_success_so_far = (BOOL)(ch->checkpoint_count==h->checkpoint_count+1 && ch->dirty==0); BOOL checkpoint_success_so_far = (BOOL)(ch->checkpoint_count==h->checkpoint_count+1 && ch->dirty==0);
if (checkpoint_success_so_far) { if (checkpoint_success_so_far) {
...@@ -3833,16 +3833,16 @@ toku_brtheader_end_checkpoint (CACHEFILE cachefile, int fd, void *header_v) { ...@@ -3833,16 +3833,16 @@ toku_brtheader_end_checkpoint (CACHEFILE cachefile, int fd, void *header_v) {
int int
toku_brtheader_close (CACHEFILE cachefile, int fd, void *header_v, char **malloced_error_string, BOOL oplsn_valid, LSN oplsn) { toku_brtheader_close (CACHEFILE cachefile, int fd, void *header_v, char **malloced_error_string, BOOL oplsn_valid, LSN oplsn) {
struct brt_header *h = header_v; struct brt_header *h = header_v;
assert(h->type == BRTHEADER_CURRENT); lazy_assert(h->type == BRTHEADER_CURRENT);
toku_brtheader_lock(h); toku_brtheader_lock(h);
assert(toku_list_empty(&h->live_brts)); lazy_assert(toku_list_empty(&h->live_brts));
assert(toku_list_empty(&h->zombie_brts)); lazy_assert(toku_list_empty(&h->zombie_brts));
toku_brtheader_unlock(h); toku_brtheader_unlock(h);
int r = 0; int r = 0;
if (h->panic) { if (h->panic) {
r = h->panic; r = h->panic;
} else if (h->dictionary_opened) { //Otherwise header has never fully been created. } else if (h->dictionary_opened) { //Otherwise header has never fully been created.
assert(h->cf == cachefile); lazy_assert(h->cf == cachefile);
TOKULOGGER logger = toku_cachefile_logger(cachefile); TOKULOGGER logger = toku_cachefile_logger(cachefile);
LSN lsn = ZERO_LSN; LSN lsn = ZERO_LSN;
//Get LSN //Get LSN
...@@ -3858,7 +3858,7 @@ toku_brtheader_close (CACHEFILE cachefile, int fd, void *header_v, char **malloc ...@@ -3858,7 +3858,7 @@ toku_brtheader_close (CACHEFILE cachefile, int fd, void *header_v, char **malloc
lsn = ZERO_LSN; // if there is no logger, we use zero for the lsn lsn = ZERO_LSN; // if there is no logger, we use zero for the lsn
if (logger) { if (logger) {
char* fname_in_env = toku_cachefile_fname_in_env(cachefile); char* fname_in_env = toku_cachefile_fname_in_env(cachefile);
assert(fname_in_env); lazy_assert(fname_in_env);
BYTESTRING bs = {.len=strlen(fname_in_env), .data=fname_in_env}; BYTESTRING bs = {.len=strlen(fname_in_env), .data=fname_in_env};
r = toku_log_fclose(logger, &lsn, h->dirty, bs, toku_cachefile_filenum(cachefile)); // flush the log on close (if new header is being written), otherwise it might not make it out. r = toku_log_fclose(logger, &lsn, h->dirty, bs, toku_cachefile_filenum(cachefile)); // flush the log on close (if new header is being written), otherwise it might not make it out.
if (r!=0) return r; if (r!=0) return r;
...@@ -3867,17 +3867,17 @@ toku_brtheader_close (CACHEFILE cachefile, int fd, void *header_v, char **malloc ...@@ -3867,17 +3867,17 @@ toku_brtheader_close (CACHEFILE cachefile, int fd, void *header_v, char **malloc
if (h->dirty) { // this is the only place this bit is tested (in currentheader) if (h->dirty) { // this is the only place this bit is tested (in currentheader)
if (logger) { //Rollback cachefile MUST NOT BE CLOSED DIRTY if (logger) { //Rollback cachefile MUST NOT BE CLOSED DIRTY
//It can be checkpointed only via 'checkpoint' //It can be checkpointed only via 'checkpoint'
assert(logger->rollback_cachefile != cachefile); lazy_assert(logger->rollback_cachefile != cachefile);
} }
int r2; int r2;
//assert(lsn.lsn!=0); //lazy_assert(lsn.lsn!=0);
r2 = toku_brtheader_begin_checkpoint(cachefile, fd, lsn, header_v); r2 = toku_brtheader_begin_checkpoint(cachefile, fd, lsn, header_v);
if (r==0) r = r2; if (r==0) r = r2;
r2 = toku_brtheader_checkpoint(cachefile, fd, h); r2 = toku_brtheader_checkpoint(cachefile, fd, h);
if (r==0) r = r2; if (r==0) r = r2;
r2 = toku_brtheader_end_checkpoint(cachefile, fd, header_v); r2 = toku_brtheader_end_checkpoint(cachefile, fd, header_v);
if (r==0) r = r2; if (r==0) r = r2;
if (!h->panic) assert(!h->dirty); // dirty bit should be cleared by begin_checkpoint and never set again (because we're closing the dictionary) if (!h->panic) lazy_assert(!h->dirty); // dirty bit should be cleared by begin_checkpoint and never set again (because we're closing the dictionary)
} }
} }
if (malloced_error_string) *malloced_error_string = h->panic_string; if (malloced_error_string) *malloced_error_string = h->panic_string;
...@@ -3896,7 +3896,7 @@ toku_brt_db_delay_closed (BRT zombie, DB* db, int (*close_db)(DB*, u_int32_t), u ...@@ -3896,7 +3896,7 @@ toku_brt_db_delay_closed (BRT zombie, DB* db, int (*close_db)(DB*, u_int32_t), u
if (zombie->was_closed) r = EINVAL; if (zombie->was_closed) r = EINVAL;
else if (zombie->db && zombie->db!=db) r = EINVAL; else if (zombie->db && zombie->db!=db) r = EINVAL;
else { else {
assert(zombie->close_db==NULL); lazy_assert(zombie->close_db==NULL);
zombie->close_db = close_db; zombie->close_db = close_db;
zombie->close_flags = close_flags; zombie->close_flags = close_flags;
zombie->was_closed = 1; zombie->was_closed = 1;
...@@ -3933,8 +3933,8 @@ toku_brt_db_delay_closed (BRT zombie, DB* db, int (*close_db)(DB*, u_int32_t), u ...@@ -3933,8 +3933,8 @@ toku_brt_db_delay_closed (BRT zombie, DB* db, int (*close_db)(DB*, u_int32_t), u
// the close and using the lsn provided by logging the close. (Subject to constraint // the close and using the lsn provided by logging the close. (Subject to constraint
// that if a newer lsn is already in the dictionary, don't overwrite the dictionary.) // that if a newer lsn is already in the dictionary, don't overwrite the dictionary.)
int toku_close_brt_lsn (BRT brt, char **error_string, BOOL oplsn_valid, LSN oplsn) { int toku_close_brt_lsn (BRT brt, char **error_string, BOOL oplsn_valid, LSN oplsn) {
assert(!toku_brt_zombie_needed(brt)); lazy_assert(!toku_brt_zombie_needed(brt));
assert(!brt->pinned_by_checkpoint); lazy_assert(!brt->pinned_by_checkpoint);
int r; int r;
while (!toku_list_empty(&brt->cursors)) { while (!toku_list_empty(&brt->cursors)) {
BRT_CURSOR c = toku_list_struct(toku_list_pop(&brt->cursors), struct brt_cursor, cursors_link); BRT_CURSOR c = toku_list_struct(toku_list_pop(&brt->cursors), struct brt_cursor, cursors_link);
...@@ -3944,18 +3944,18 @@ int toku_close_brt_lsn (BRT brt, char **error_string, BOOL oplsn_valid, LSN opls ...@@ -3944,18 +3944,18 @@ int toku_close_brt_lsn (BRT brt, char **error_string, BOOL oplsn_valid, LSN opls
// Must do this work before closing the cf // Must do this work before closing the cf
r=toku_txn_note_close_brt(brt); r=toku_txn_note_close_brt(brt);
assert(r==0); lazy_assert_zero(r);
toku_omt_destroy(&brt->txns); toku_omt_destroy(&brt->txns);
brtheader_note_brt_close(brt); brtheader_note_brt_close(brt);
if (brt->cf) { if (brt->cf) {
if (!brt->h->panic) if (!brt->h->panic)
assert(0==toku_cachefile_count_pinned(brt->cf, 1)); // For the brt, the pinned count should be zero (but if panic, don't worry) lazy_assert(0==toku_cachefile_count_pinned(brt->cf, 1)); // For the brt, the pinned count should be zero (but if panic, don't worry)
//printf("%s:%d closing cachetable\n", __FILE__, __LINE__); //printf("%s:%d closing cachetable\n", __FILE__, __LINE__);
// printf("%s:%d brt=%p ,brt->h=%p\n", __FILE__, __LINE__, brt, brt->h); // printf("%s:%d brt=%p ,brt->h=%p\n", __FILE__, __LINE__, brt, brt->h);
if (error_string) assert(*error_string == 0); if (error_string) lazy_assert(*error_string == 0);
r = toku_cachefile_close(&brt->cf, error_string, oplsn_valid, oplsn); r = toku_cachefile_close(&brt->cf, error_string, oplsn_valid, oplsn);
if (r==0 && error_string) assert(*error_string == 0); if (r==0 && error_string) lazy_assert(*error_string == 0);
} }
if (brt->temp_descriptor.dbt.data) toku_free(brt->temp_descriptor.dbt.data); if (brt->temp_descriptor.dbt.data) toku_free(brt->temp_descriptor.dbt.data);
toku_free(brt); toku_free(brt);
...@@ -3995,7 +3995,7 @@ toku_brt_set_descriptor (BRT t, u_int32_t version, const DBT* descriptor) { ...@@ -3995,7 +3995,7 @@ toku_brt_set_descriptor (BRT t, u_int32_t version, const DBT* descriptor) {
if (!copy) r = ENOMEM; if (!copy) r = ENOMEM;
else { else {
t->temp_descriptor.version = version; t->temp_descriptor.version = version;
assert(!t->temp_descriptor.dbt.data); lazy_assert(!t->temp_descriptor.dbt.data);
toku_fill_dbt(&t->temp_descriptor.dbt, copy, descriptor->size); toku_fill_dbt(&t->temp_descriptor.dbt, copy, descriptor->size);
t->did_set_descriptor = TRUE; t->did_set_descriptor = TRUE;
r = 0; r = 0;
...@@ -4078,14 +4078,14 @@ static inline int brt_cursor_extract_key_and_val( ...@@ -4078,14 +4078,14 @@ static inline int brt_cursor_extract_key_and_val(
static inline void load_dbts_from_omt(BRT_CURSOR c, DBT *key, DBT *val) { static inline void load_dbts_from_omt(BRT_CURSOR c, DBT *key, DBT *val) {
OMTVALUE le = 0; OMTVALUE le = 0;
int r = toku_omt_cursor_current(c->omtcursor, &le); int r = toku_omt_cursor_current(c->omtcursor, &le);
assert(r==0); lazy_assert_zero(r);
r = brt_cursor_extract_key_and_val(le, r = brt_cursor_extract_key_and_val(le,
c, c,
&key->size, &key->size,
&key->data, &key->data,
&val->size, &val->size,
&val->data); &val->data);
assert(r==0); lazy_assert_zero(r);
} }
// When an omt cursor is invalidated, this is the brt-level function // When an omt cursor is invalidated, this is the brt-level function
...@@ -4108,7 +4108,7 @@ brt_cursor_invalidate_callback(OMTCURSOR UU(omt_c), void *extra) { ...@@ -4108,7 +4108,7 @@ brt_cursor_invalidate_callback(OMTCURSOR UU(omt_c), void *extra) {
cursor->val.size = val.size; cursor->val.size = val.size;
//TODO: Find some way to deal with ENOMEM here. //TODO: Find some way to deal with ENOMEM here.
//Until then, just assert that the memdups worked. //Until then, just assert that the memdups worked.
assert(cursor->key.data && cursor->val.data); lazy_assert(cursor->key.data && cursor->val.data);
cursor->current_in_omt = FALSE; cursor->current_in_omt = FALSE;
} }
} }
...@@ -4154,7 +4154,7 @@ int toku_brt_cursor ( ...@@ -4154,7 +4154,7 @@ int toku_brt_cursor (
cursor->ttxn = ttxn; cursor->ttxn = ttxn;
toku_list_push(&brt->cursors, &cursor->cursors_link); toku_list_push(&brt->cursors, &cursor->cursors_link);
int r = toku_omt_cursor_create(&cursor->omtcursor); int r = toku_omt_cursor_create(&cursor->omtcursor);
assert(r==0); lazy_assert_zero(r);
toku_omt_cursor_set_invalidate_callback(cursor->omtcursor, toku_omt_cursor_set_invalidate_callback(cursor->omtcursor,
brt_cursor_invalidate_callback, cursor); brt_cursor_invalidate_callback, cursor);
cursor->root_put_counter=0; cursor->root_put_counter=0;
...@@ -4207,7 +4207,7 @@ static inline BOOL brt_cursor_prefetching(BRT_CURSOR cursor) { ...@@ -4207,7 +4207,7 @@ static inline BOOL brt_cursor_prefetching(BRT_CURSOR cursor) {
//Return TRUE if cursor is uninitialized. FALSE otherwise. //Return TRUE if cursor is uninitialized. FALSE otherwise.
static BOOL static BOOL
brt_cursor_not_set(BRT_CURSOR cursor) { brt_cursor_not_set(BRT_CURSOR cursor) {
assert((cursor->key.data==NULL) == (cursor->val.data==NULL)); lazy_assert((cursor->key.data==NULL) == (cursor->val.data==NULL));
return (BOOL)(!cursor->current_in_omt && cursor->key.data == NULL); return (BOOL)(!cursor->current_in_omt && cursor->key.data == NULL);
} }
...@@ -4333,10 +4333,10 @@ brt_search_leaf_node(BRTNODE node, brt_search_t *search, BRT_GET_CALLBACK_FUNCTI ...@@ -4333,10 +4333,10 @@ brt_search_leaf_node(BRTNODE node, brt_search_t *search, BRT_GET_CALLBACK_FUNCTI
idx--; idx--;
break; break;
default: default:
assert(FALSE); lazy_assert(FALSE);
} }
r = toku_omt_fetch(node->u.l.buffer, idx, &datav, NULL); r = toku_omt_fetch(node->u.l.buffer, idx, &datav, NULL);
assert(r==0); // we just validated the index lazy_assert_zero(r); // we just validated the index
le = datav; le = datav;
if (!is_le_val_empty(le,brtcursor)) goto got_a_good_value; if (!is_le_val_empty(le,brtcursor)) goto got_a_good_value;
} }
...@@ -4358,7 +4358,7 @@ got_a_good_value: ...@@ -4358,7 +4358,7 @@ got_a_good_value:
&vallen, &vallen,
&val); &val);
assert(brtcursor->current_in_omt == FALSE); lazy_assert(brtcursor->current_in_omt == FALSE);
if (r==0) { if (r==0) {
r = getf(keylen, key, vallen, val, getf_v); r = getf(keylen, key, vallen, val, getf_v);
} }
...@@ -4430,7 +4430,7 @@ brt_search_child(BRT brt, BRTNODE node, int childnum, brt_search_t *search, BRT_ ...@@ -4430,7 +4430,7 @@ brt_search_child(BRT brt, BRTNODE node, int childnum, brt_search_t *search, BRT_
BOOL did_io = FALSE; BOOL did_io = FALSE;
enum reactivity child_re = RE_STABLE; enum reactivity child_re = RE_STABLE;
int rr = flush_this_child(brt, node, childnum, &child_re, &did_io); int rr = flush_this_child(brt, node, childnum, &child_re, &did_io);
assert(rr == 0); lazy_assert_zero(rr);
/* push down may cause the child to be overfull, but that's OK. We'll search the child anyway, and recompute the ractivity. */ /* push down may cause the child to be overfull, but that's OK. We'll search the child anyway, and recompute the ractivity. */
} }
...@@ -4439,7 +4439,7 @@ brt_search_child(BRT brt, BRTNODE node, int childnum, brt_search_t *search, BRT_ ...@@ -4439,7 +4439,7 @@ brt_search_child(BRT brt, BRTNODE node, int childnum, brt_search_t *search, BRT_
u_int32_t fullhash = compute_child_fullhash(brt->cf, node, childnum); u_int32_t fullhash = compute_child_fullhash(brt->cf, node, childnum);
{ {
int rr = toku_cachetable_get_and_pin(brt->cf, childblocknum, fullhash, &node_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h); int rr = toku_cachetable_get_and_pin(brt->cf, childblocknum, fullhash, &node_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h);
assert(rr == 0); lazy_assert_zero(rr);
} }
BRTNODE childnode = node_v; BRTNODE childnode = node_v;
...@@ -4502,7 +4502,7 @@ brt_search_nonleaf_node(BRT brt, BRTNODE node, brt_search_t *search, BRT_GET_CAL ...@@ -4502,7 +4502,7 @@ brt_search_nonleaf_node(BRT brt, BRTNODE node, brt_search_t *search, BRT_GET_CAL
BOOL did_change_shape = FALSE; BOOL did_change_shape = FALSE;
verify_local_fingerprint_nonleaf(node); verify_local_fingerprint_nonleaf(node);
int r = brt_search_child(brt, node, child[c], search, getf, getf_v, re, doprefetch, brtcursor, &did_change_shape); int r = brt_search_child(brt, node, child[c], search, getf, getf_v, re, doprefetch, brtcursor, &did_change_shape);
assert(r != EAGAIN); lazy_assert(r != EAGAIN);
if (r == 0) return r; //Success if (r == 0) return r; //Success
if (r != DB_NOTFOUND) return r; //Error (or message to quit early, such as TOKUDB_FOUND_BUT_REJECTED) if (r != DB_NOTFOUND) return r; //Error (or message to quit early, such as TOKUDB_FOUND_BUT_REJECTED)
if (did_change_shape) goto again; if (did_change_shape) goto again;
...@@ -4534,7 +4534,7 @@ toku_brt_search (BRT brt, brt_search_t *search, BRT_GET_CALLBACK_FUNCTION getf, ...@@ -4534,7 +4534,7 @@ toku_brt_search (BRT brt, brt_search_t *search, BRT_GET_CALLBACK_FUNCTION getf,
{ {
int r, rr; int r, rr;
assert(brt->h); lazy_assert(brt->h);
*root_put_counter = brt->h->root_put_counter; *root_put_counter = brt->h->root_put_counter;
...@@ -4543,10 +4543,10 @@ toku_brt_search (BRT brt, brt_search_t *search, BRT_GET_CALLBACK_FUNCTION getf, ...@@ -4543,10 +4543,10 @@ toku_brt_search (BRT brt, brt_search_t *search, BRT_GET_CALLBACK_FUNCTION getf,
void *node_v; void *node_v;
//assert(fullhash == toku_cachetable_hash(brt->cf, *rootp)); //lazy_assert(fullhash == toku_cachetable_hash(brt->cf, *rootp));
rr = toku_cachetable_get_and_pin(brt->cf, *rootp, fullhash, rr = toku_cachetable_get_and_pin(brt->cf, *rootp, fullhash,
&node_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h); &node_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h);
assert(rr == 0); lazy_assert_zero(rr);
BRTNODE node = node_v; BRTNODE node = node_v;
...@@ -4562,7 +4562,7 @@ toku_brt_search (BRT brt, brt_search_t *search, BRT_GET_CALLBACK_FUNCTION getf, ...@@ -4562,7 +4562,7 @@ toku_brt_search (BRT brt, brt_search_t *search, BRT_GET_CALLBACK_FUNCTION getf,
return_r: return_r:
rr = toku_unpin_brtnode(brt, node); rr = toku_unpin_brtnode(brt, node);
assert(rr == 0); lazy_assert_zero(rr);
//Heaviside function (+direction) queries define only a lower or upper //Heaviside function (+direction) queries define only a lower or upper
//bound. Some queries require both an upper and lower bound. //bound. Some queries require both an upper and lower bound.
...@@ -4713,14 +4713,14 @@ brt_cursor_shortcut (BRT_CURSOR cursor, int direction, u_int32_t limit, BRT_GET_ ...@@ -4713,14 +4713,14 @@ brt_cursor_shortcut (BRT_CURSOR cursor, int direction, u_int32_t limit, BRT_GET_
if (c_put_counter==h_put_counter && toku_omt_cursor_is_valid(cursor->omtcursor)) { if (c_put_counter==h_put_counter && toku_omt_cursor_is_valid(cursor->omtcursor)) {
u_int32_t index = 0; u_int32_t index = 0;
r = toku_omt_cursor_current_index(omtcursor, &index); r = toku_omt_cursor_current_index(omtcursor, &index);
assert(r==0); lazy_assert_zero(r);
//Starting with the prev, find the first real (non-provdel) leafentry. //Starting with the prev, find the first real (non-provdel) leafentry.
while (index != limit) { while (index != limit) {
OMTVALUE le = NULL; OMTVALUE le = NULL;
index += direction; index += direction;
r = toku_omt_fetch(omt, index, &le, NULL); r = toku_omt_fetch(omt, index, &le, NULL);
assert(r==0); lazy_assert_zero(r);
if (toku_brt_cursor_is_leaf_mode(cursor) || !is_le_val_empty(le, cursor)) { if (toku_brt_cursor_is_leaf_mode(cursor) || !is_le_val_empty(le, cursor)) {
maybe_do_implicit_promotion_on_query(cursor, le); maybe_do_implicit_promotion_on_query(cursor, le);
...@@ -4764,11 +4764,11 @@ brt_cursor_maybe_get_and_pin_leaf(BRT_CURSOR brtcursor, BRTNODE* leafp) { ...@@ -4764,11 +4764,11 @@ brt_cursor_maybe_get_and_pin_leaf(BRT_CURSOR brtcursor, BRTNODE* leafp) {
brtcursor->leaf_info.blocknumber, brtcursor->leaf_info.blocknumber,
brtcursor->leaf_info.fullhash, brtcursor->leaf_info.fullhash,
&leafv); &leafv);
assert(r==0); lazy_assert_zero(r);
if (r == 0) { if (r == 0) {
brtcursor->leaf_info.node = leafv; brtcursor->leaf_info.node = leafv;
assert(brtcursor->leaf_info.node->height == 0); // verify that returned node is leaf... lazy_assert(brtcursor->leaf_info.node->height == 0); // verify that returned node is leaf...
assert(brtcursor->leaf_info.node->u.l.buffer == toku_omt_cursor_get_omt(brtcursor->omtcursor)); // ... and has right omt lazy_assert(brtcursor->leaf_info.node->u.l.buffer == toku_omt_cursor_get_omt(brtcursor->omtcursor)); // ... and has right omt
*leafp = brtcursor->leaf_info.node; *leafp = brtcursor->leaf_info.node;
} }
return r; return r;
...@@ -5015,7 +5015,7 @@ toku_brt_lookup (BRT brt, DBT *k, BRT_GET_CALLBACK_FUNCTION getf, void *getf_v) ...@@ -5015,7 +5015,7 @@ toku_brt_lookup (BRT brt, DBT *k, BRT_GET_CALLBACK_FUNCTION getf, void *getf_v)
int op = DB_SET; int op = DB_SET;
r = toku_brt_cursor_get(cursor, k, getf, getf_v, op); r = toku_brt_cursor_get(cursor, k, getf, getf_v, op);
rr = toku_brt_cursor_close(cursor); assert(rr == 0); rr = toku_brt_cursor_close(cursor); lazy_assert_zero(rr);
return r; return r;
} }
...@@ -5060,12 +5060,12 @@ static void toku_brt_keyrange_internal (BRT brt, CACHEKEY nodename, u_int32_t fu ...@@ -5060,12 +5060,12 @@ static void toku_brt_keyrange_internal (BRT brt, CACHEKEY nodename, u_int32_t fu
BRTNODE node; BRTNODE node;
{ {
void *node_v; void *node_v;
//assert(fullhash == toku_cachetable_hash(brt->cf, nodename)); //lazy_assert(fullhash == toku_cachetable_hash(brt->cf, nodename));
int rr = toku_cachetable_get_and_pin(brt->cf, nodename, fullhash, int rr = toku_cachetable_get_and_pin(brt->cf, nodename, fullhash,
&node_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h); &node_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h);
assert(rr == 0); lazy_assert_zero(rr);
node = node_v; node = node_v;
assert(node->fullhash==fullhash); lazy_assert(node->fullhash==fullhash);
} }
if (node->height>0) { if (node->height>0) {
int n_keys = node->u.n.n_children-1; int n_keys = node->u.n.n_children-1;
...@@ -5109,12 +5109,12 @@ static void toku_brt_keyrange_internal (BRT brt, CACHEKEY nodename, u_int32_t fu ...@@ -5109,12 +5109,12 @@ static void toku_brt_keyrange_internal (BRT brt, CACHEKEY nodename, u_int32_t fu
} }
{ {
int rr = toku_unpin_brtnode(brt, node); int rr = toku_unpin_brtnode(brt, node);
assert(rr == 0); lazy_assert_zero(rr);
} }
} }
int toku_brt_keyrange (BRT brt, DBT *key, u_int64_t *less, u_int64_t *equal, u_int64_t *greater) { int toku_brt_keyrange (BRT brt, DBT *key, u_int64_t *less, u_int64_t *equal, u_int64_t *greater) {
assert(brt->h); lazy_assert(brt->h);
u_int32_t fullhash; u_int32_t fullhash;
CACHEKEY *rootp = toku_calculate_root_offset_pointer(brt, &fullhash); CACHEKEY *rootp = toku_calculate_root_offset_pointer(brt, &fullhash);
...@@ -5129,11 +5129,11 @@ int toku_brt_stat64 (BRT brt, TOKUTXN UU(txn), struct brtstat64_s *s) { ...@@ -5129,11 +5129,11 @@ int toku_brt_stat64 (BRT brt, TOKUTXN UU(txn), struct brtstat64_s *s) {
int fd = toku_cachefile_get_and_pin_fd(brt->cf); int fd = toku_cachefile_get_and_pin_fd(brt->cf);
int r = toku_os_get_file_size(fd, &file_size); int r = toku_os_get_file_size(fd, &file_size);
toku_cachefile_unpin_fd(brt->cf); toku_cachefile_unpin_fd(brt->cf);
assert(r==0); lazy_assert_zero(r);
s->fsize = file_size + toku_cachefile_size_in_memory(brt->cf); s->fsize = file_size + toku_cachefile_size_in_memory(brt->cf);
} }
assert(brt->h); lazy_assert(brt->h);
u_int32_t fullhash; u_int32_t fullhash;
CACHEKEY *rootp = toku_calculate_root_offset_pointer(brt, &fullhash); CACHEKEY *rootp = toku_calculate_root_offset_pointer(brt, &fullhash);
CACHEKEY root = *rootp; CACHEKEY root = *rootp;
...@@ -5174,10 +5174,10 @@ toku_dump_brtnode (FILE *file, BRT brt, BLOCKNUM blocknum, int depth, bytevec lo ...@@ -5174,10 +5174,10 @@ toku_dump_brtnode (FILE *file, BRT brt, BLOCKNUM blocknum, int depth, bytevec lo
int r = toku_cachetable_get_and_pin(brt->cf, blocknum, fullhash, int r = toku_cachetable_get_and_pin(brt->cf, blocknum, fullhash,
&node_v, NULL, &node_v, NULL,
toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h); toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h);
assert(r==0); lazy_assert_zero(r);
fprintf(file, "%s:%d pin %p\n", __FILE__, __LINE__, node_v); fprintf(file, "%s:%d pin %p\n", __FILE__, __LINE__, node_v);
node=node_v; node=node_v;
assert(node->fullhash==fullhash); lazy_assert(node->fullhash==fullhash);
result=toku_verify_brtnode(brt, blocknum, lorange, lolen, hirange, hilen, 0); result=toku_verify_brtnode(brt, blocknum, lorange, lolen, hirange, hilen, 0);
fprintf(file, "%*sNode=%p\n", depth, "", node); fprintf(file, "%*sNode=%p\n", depth, "", node);
if (node->height>0) { if (node->height>0) {
...@@ -5203,8 +5203,8 @@ toku_dump_brtnode (FILE *file, BRT brt, BLOCKNUM blocknum, int depth, bytevec lo ...@@ -5203,8 +5203,8 @@ toku_dump_brtnode (FILE *file, BRT brt, BLOCKNUM blocknum, int depth, bytevec lo
{ {
data=data; datalen=datalen; keylen=keylen; data=data; datalen=datalen; keylen=keylen;
fprintf(file, "%*s xid=%"PRIu64" %u (type=%d)\n", depth+2, "", xids_get_innermost_xid(xids), (unsigned)toku_dtoh32(*(int*)key), type); fprintf(file, "%*s xid=%"PRIu64" %u (type=%d)\n", depth+2, "", xids_get_innermost_xid(xids), (unsigned)toku_dtoh32(*(int*)key), type);
//assert(strlen((char*)key)+1==keylen); //lazy_assert(strlen((char*)key)+1==keylen);
//assert(strlen((char*)data)+1==datalen); //lazy_assert(strlen((char*)data)+1==datalen);
}); });
} }
for (i=0; i<node->u.n.n_children; i++) { for (i=0; i<node->u.n.n_children; i++) {
...@@ -5235,7 +5235,7 @@ toku_dump_brtnode (FILE *file, BRT brt, BLOCKNUM blocknum, int depth, bytevec lo ...@@ -5235,7 +5235,7 @@ toku_dump_brtnode (FILE *file, BRT brt, BLOCKNUM blocknum, int depth, bytevec lo
for (i=0; i<size; i++) { for (i=0; i<size; i++) {
OMTVALUE v = 0; OMTVALUE v = 0;
r = toku_omt_fetch(node->u.l.buffer, i, &v, 0); r = toku_omt_fetch(node->u.l.buffer, i, &v, 0);
assert(r==0); lazy_assert_zero(r);
fprintf(file, " [%d]=", i); fprintf(file, " [%d]=", i);
print_leafentry(file, v); print_leafentry(file, v);
fprintf(file, "\n"); fprintf(file, "\n");
...@@ -5244,13 +5244,13 @@ toku_dump_brtnode (FILE *file, BRT brt, BLOCKNUM blocknum, int depth, bytevec lo ...@@ -5244,13 +5244,13 @@ toku_dump_brtnode (FILE *file, BRT brt, BLOCKNUM blocknum, int depth, bytevec lo
fprintf(file, "\n"); fprintf(file, "\n");
} }
r = toku_cachetable_unpin(brt->cf, blocknum, fullhash, CACHETABLE_CLEAN, 0); r = toku_cachetable_unpin(brt->cf, blocknum, fullhash, CACHETABLE_CLEAN, 0);
assert(r==0); lazy_assert_zero(r);
return result; return result;
} }
int toku_dump_brt (FILE *f, BRT brt) { int toku_dump_brt (FILE *f, BRT brt) {
CACHEKEY *rootp; CACHEKEY *rootp;
assert(brt->h); lazy_assert(brt->h);
u_int32_t fullhash; u_int32_t fullhash;
toku_dump_translation_table(f, brt->h->blocktable); toku_dump_translation_table(f, brt->h->blocktable);
rootp = toku_calculate_root_offset_pointer(brt, &fullhash); rootp = toku_calculate_root_offset_pointer(brt, &fullhash);
...@@ -5333,8 +5333,8 @@ int toku_brt_destroy(void) { ...@@ -5333,8 +5333,8 @@ int toku_brt_destroy(void) {
//Suppress both rollback and recovery logs. //Suppress both rollback and recovery logs.
void void
toku_brt_suppress_recovery_logs (BRT brt, TOKUTXN txn) { toku_brt_suppress_recovery_logs (BRT brt, TOKUTXN txn) {
assert(brt->h->txnid_that_created_or_locked_when_empty == toku_txn_get_txnid(txn)); lazy_assert(brt->h->txnid_that_created_or_locked_when_empty == toku_txn_get_txnid(txn));
assert(brt->h->txnid_that_suppressed_recovery_logs == TXNID_NONE); lazy_assert(brt->h->txnid_that_suppressed_recovery_logs == TXNID_NONE);
brt->h->txnid_that_suppressed_recovery_logs = toku_txn_get_txnid(txn); brt->h->txnid_that_suppressed_recovery_logs = toku_txn_get_txnid(txn);
toku_list_push(&txn->checkpoint_before_commit, &brt->h->checkpoint_before_commit_link); toku_list_push(&txn->checkpoint_before_commit, &brt->h->checkpoint_before_commit_link);
} }
...@@ -5375,7 +5375,7 @@ int toku_logger_log_fdelete (TOKUTXN txn, const char *fname, FILENUM filenum, u_ ...@@ -5375,7 +5375,7 @@ int toku_logger_log_fdelete (TOKUTXN txn, const char *fname, FILENUM filenum, u_
// - make entry in rollback log // - make entry in rollback log
// - make fdelete entry in recovery log // - make fdelete entry in recovery log
int toku_brt_remove_on_commit(TOKUTXN txn, DBT* iname_in_env_dbt_p) { int toku_brt_remove_on_commit(TOKUTXN txn, DBT* iname_in_env_dbt_p) {
assert(txn); lazy_assert(txn);
int r; int r;
const char *iname_in_env = iname_in_env_dbt_p->data; const char *iname_in_env = iname_in_env_dbt_p->data;
CACHEFILE cf = NULL; CACHEFILE cf = NULL;
...@@ -5395,7 +5395,7 @@ int toku_brt_remove_on_commit(TOKUTXN txn, DBT* iname_in_env_dbt_p) { ...@@ -5395,7 +5395,7 @@ int toku_brt_remove_on_commit(TOKUTXN txn, DBT* iname_in_env_dbt_p) {
} }
else { else {
//Header exists, so at least one brt must. No live means at least one zombie. //Header exists, so at least one brt must. No live means at least one zombie.
assert(!toku_list_empty(&h->zombie_brts)); lazy_assert(!toku_list_empty(&h->zombie_brts));
brt = toku_list_struct(toku_list_head(&h->zombie_brts), struct brt, zombie_brt_link); brt = toku_list_struct(toku_list_head(&h->zombie_brts), struct brt, zombie_brt_link);
} }
toku_brtheader_unlock(h); toku_brtheader_unlock(h);
...@@ -5403,7 +5403,7 @@ int toku_brt_remove_on_commit(TOKUTXN txn, DBT* iname_in_env_dbt_p) { ...@@ -5403,7 +5403,7 @@ int toku_brt_remove_on_commit(TOKUTXN txn, DBT* iname_in_env_dbt_p) {
if (r!=0) return r; if (r!=0) return r;
} }
else else
assert(r==ENOENT); lazy_assert(r==ENOENT);
toku_txn_force_fsync_on_commit(txn); //If the txn commits, the commit MUST be in the log toku_txn_force_fsync_on_commit(txn); //If the txn commits, the commit MUST be in the log
//before the file is actually unlinked //before the file is actually unlinked
...@@ -5411,7 +5411,7 @@ int toku_brt_remove_on_commit(TOKUTXN txn, DBT* iname_in_env_dbt_p) { ...@@ -5411,7 +5411,7 @@ int toku_brt_remove_on_commit(TOKUTXN txn, DBT* iname_in_env_dbt_p) {
BYTESTRING iname_in_env_bs = { .len=strlen(iname_in_env), .data = (char*)iname_in_env }; BYTESTRING iname_in_env_bs = { .len=strlen(iname_in_env), .data = (char*)iname_in_env };
// make entry in rollback log // make entry in rollback log
r = toku_logger_save_rollback_fdelete(txn, was_open, filenum, &iname_in_env_bs); r = toku_logger_save_rollback_fdelete(txn, was_open, filenum, &iname_in_env_bs);
assert(r==0); //On error we would need to remove the CF reference, which is complicated. lazy_assert_zero(r); //On error we would need to remove the CF reference, which is complicated.
} }
if (r==0) if (r==0)
// make entry in recovery log // make entry in recovery log
...@@ -5428,14 +5428,14 @@ int toku_brt_remove_now(CACHETABLE ct, DBT* iname_in_env_dbt_p) { ...@@ -5428,14 +5428,14 @@ int toku_brt_remove_now(CACHETABLE ct, DBT* iname_in_env_dbt_p) {
r = toku_cachefile_of_iname_in_env(ct, iname_in_env, &cf); r = toku_cachefile_of_iname_in_env(ct, iname_in_env, &cf);
if (r == 0) { if (r == 0) {
r = toku_cachefile_redirect_nullfd(cf); r = toku_cachefile_redirect_nullfd(cf);
assert(r==0); lazy_assert_zero(r);
} }
else else
assert(r==ENOENT); lazy_assert(r==ENOENT);
char *iname_in_cwd = toku_cachetable_get_fname_in_cwd(ct, iname_in_env_dbt_p->data); char *iname_in_cwd = toku_cachetable_get_fname_in_cwd(ct, iname_in_env_dbt_p->data);
r = unlink(iname_in_cwd); // we need a pathname relative to cwd r = unlink(iname_in_cwd); // we need a pathname relative to cwd
assert(r==0); lazy_assert_zero(r);
toku_free(iname_in_cwd); toku_free(iname_in_cwd);
return r; return r;
} }
...@@ -5472,7 +5472,7 @@ walk_tree_nonleaf (BRT brt, BRTNODE node, BOOL f(BRT brt, BRTNODE node, void *v) ...@@ -5472,7 +5472,7 @@ walk_tree_nonleaf (BRT brt, BRTNODE node, BOOL f(BRT brt, BRTNODE node, void *v)
BOOL did_io = FALSE; BOOL did_io = FALSE;
enum reactivity child_re = RE_STABLE; enum reactivity child_re = RE_STABLE;
int rr = flush_this_child(brt, node, childnum, &child_re, &did_io); int rr = flush_this_child(brt, node, childnum, &child_re, &did_io);
assert(rr == 0); lazy_assert_zero(rr);
} }
BRTNODE childnode; BRTNODE childnode;
{ {
...@@ -5480,7 +5480,7 @@ walk_tree_nonleaf (BRT brt, BRTNODE node, BOOL f(BRT brt, BRTNODE node, void *v) ...@@ -5480,7 +5480,7 @@ walk_tree_nonleaf (BRT brt, BRTNODE node, BOOL f(BRT brt, BRTNODE node, void *v)
BLOCKNUM childblocknum = BNC_BLOCKNUM(node,childnum); BLOCKNUM childblocknum = BNC_BLOCKNUM(node,childnum);
u_int32_t fullhash = compute_child_fullhash(brt->cf, node, childnum); u_int32_t fullhash = compute_child_fullhash(brt->cf, node, childnum);
int rr = toku_cachetable_get_and_pin(brt->cf, childblocknum, fullhash, &node_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h); int rr = toku_cachetable_get_and_pin(brt->cf, childblocknum, fullhash, &node_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h);
assert(rr ==0); lazy_assert(rr ==0);
childnode = node_v; childnode = node_v;
} }
enum reactivity child_re = RE_STABLE; enum reactivity child_re = RE_STABLE;
...@@ -5493,7 +5493,7 @@ walk_tree_nonleaf (BRT brt, BRTNODE node, BOOL f(BRT brt, BRTNODE node, void *v) ...@@ -5493,7 +5493,7 @@ walk_tree_nonleaf (BRT brt, BRTNODE node, BOOL f(BRT brt, BRTNODE node, void *v)
} }
{ {
int rr = toku_unpin_brtnode(brt, childnode); int rr = toku_unpin_brtnode(brt, childnode);
assert(rr==0); lazy_assert_zero(rr);
} }
if (r!=0 || *exit_now || *try_again) break; // if we changed the shape of the tree then we're going to have to try again if (r!=0 || *exit_now || *try_again) break; // if we changed the shape of the tree then we're going to have to try again
} }
...@@ -5537,12 +5537,12 @@ walk_tree (BRT brt, BOOL f(BRT brt, BRTNODE node, void *v), void *v, BOOL modifi ...@@ -5537,12 +5537,12 @@ walk_tree (BRT brt, BOOL f(BRT brt, BRTNODE node, void *v), void *v, BOOL modifi
CACHEKEY *rootp = toku_calculate_root_offset_pointer(brt, &fullhash); CACHEKEY *rootp = toku_calculate_root_offset_pointer(brt, &fullhash);
BRTNODE node; BRTNODE node;
//assert(fullhash == toku_cachetable_hash(brt->cf, *rootp)); //lazy_assert(fullhash == toku_cachetable_hash(brt->cf, *rootp));
{ {
void *node_v; void *node_v;
int rr = toku_cachetable_get_and_pin(brt->cf, *rootp, fullhash, int rr = toku_cachetable_get_and_pin(brt->cf, *rootp, fullhash,
&node_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h); &node_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h);
assert(rr==0); lazy_assert_zero(rr);
node = node_v; node = node_v;
} }
enum reactivity re = RE_STABLE; enum reactivity re = RE_STABLE;
...@@ -5553,7 +5553,7 @@ walk_tree (BRT brt, BOOL f(BRT brt, BRTNODE node, void *v), void *v, BOOL modifi ...@@ -5553,7 +5553,7 @@ walk_tree (BRT brt, BOOL f(BRT brt, BRTNODE node, void *v), void *v, BOOL modifi
return_r: return_r:
{ {
int rr = toku_unpin_brtnode(brt, node); int rr = toku_unpin_brtnode(brt, node);
assert(rr==0); lazy_assert_zero(rr);
} }
return r; return r;
} }
...@@ -5604,7 +5604,7 @@ toku_brt_is_empty (BRT brt, /*out*/BOOL *try_again) { ...@@ -5604,7 +5604,7 @@ toku_brt_is_empty (BRT brt, /*out*/BOOL *try_again) {
struct is_empty_struct_s is_empty_struct = { TRUE, &brtcmd }; struct is_empty_struct_s is_empty_struct = { TRUE, &brtcmd };
int r = walk_tree(brt, check_if_node_is_empty, &is_empty_struct, TRUE, try_again); int r = walk_tree(brt, check_if_node_is_empty, &is_empty_struct, TRUE, try_again);
assert(r==0); lazy_assert_zero(r);
xids_destroy(&message_xids); xids_destroy(&message_xids);
...@@ -5621,13 +5621,13 @@ static BOOL is_empty_fast_iter (BRT brt, BRTNODE node) { ...@@ -5621,13 +5621,13 @@ static BOOL is_empty_fast_iter (BRT brt, BRTNODE node) {
BLOCKNUM childblocknum = BNC_BLOCKNUM(node,childnum); BLOCKNUM childblocknum = BNC_BLOCKNUM(node,childnum);
u_int32_t fullhash = compute_child_fullhash(brt->cf, node, childnum); u_int32_t fullhash = compute_child_fullhash(brt->cf, node, childnum);
int rr = toku_cachetable_get_and_pin(brt->cf, childblocknum, fullhash, &node_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h); int rr = toku_cachetable_get_and_pin(brt->cf, childblocknum, fullhash, &node_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h);
assert(rr ==0); lazy_assert(rr ==0);
childnode = node_v; childnode = node_v;
} }
int child_is_empty = is_empty_fast_iter(brt, childnode); int child_is_empty = is_empty_fast_iter(brt, childnode);
{ {
int rr = toku_unpin_brtnode(brt, childnode); int rr = toku_unpin_brtnode(brt, childnode);
assert(rr==0); lazy_assert_zero(rr);
} }
if (!child_is_empty) return 0; if (!child_is_empty) return 0;
} }
...@@ -5645,18 +5645,18 @@ BOOL toku_brt_is_empty_fast (BRT brt) ...@@ -5645,18 +5645,18 @@ BOOL toku_brt_is_empty_fast (BRT brt)
u_int32_t fullhash; u_int32_t fullhash;
CACHEKEY *rootp = toku_calculate_root_offset_pointer(brt, &fullhash); CACHEKEY *rootp = toku_calculate_root_offset_pointer(brt, &fullhash);
BRTNODE node; BRTNODE node;
//assert(fullhash == toku_cachetable_hash(brt->cf, *rootp)); //lazy_assert(fullhash == toku_cachetable_hash(brt->cf, *rootp));
{ {
void *node_v; void *node_v;
int rr = toku_cachetable_get_and_pin(brt->cf, *rootp, fullhash, int rr = toku_cachetable_get_and_pin(brt->cf, *rootp, fullhash,
&node_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h); &node_v, NULL, toku_brtnode_flush_callback, toku_brtnode_fetch_callback, brt->h);
assert(rr==0); lazy_assert_zero(rr);
node = node_v; node = node_v;
} }
BOOL r = is_empty_fast_iter(brt, node); BOOL r = is_empty_fast_iter(brt, node);
{ {
int rr = toku_unpin_brtnode(brt, node); int rr = toku_unpin_brtnode(brt, node);
assert(rr==0); lazy_assert_zero(rr);
} }
return r; return r;
} }
......
...@@ -108,19 +108,19 @@ int brt_loader_lock_init(BRTLOADER bl) { ...@@ -108,19 +108,19 @@ int brt_loader_lock_init(BRTLOADER bl) {
void brt_loader_lock_destroy(BRTLOADER bl) { void brt_loader_lock_destroy(BRTLOADER bl) {
if (bl->mutex_init) { if (bl->mutex_init) {
int r = toku_pthread_mutex_destroy(&bl->mutex); resource_assert(r == 0); int r = toku_pthread_mutex_destroy(&bl->mutex); resource_assert_zero(r);
bl->mutex_init = FALSE; bl->mutex_init = FALSE;
} }
} }
static void brt_loader_lock(BRTLOADER bl) { static void brt_loader_lock(BRTLOADER bl) {
invariant(bl->mutex_init); invariant(bl->mutex_init);
int r = toku_pthread_mutex_lock(&bl->mutex); resource_assert(r == 0); int r = toku_pthread_mutex_lock(&bl->mutex); resource_assert_zero(r);
} }
static void brt_loader_unlock(BRTLOADER bl) { static void brt_loader_unlock(BRTLOADER bl) {
invariant(bl->mutex_init); invariant(bl->mutex_init);
int r = toku_pthread_mutex_unlock(&bl->mutex); resource_assert(r == 0); int r = toku_pthread_mutex_unlock(&bl->mutex); resource_assert_zero(r);
} }
static int add_big_buffer(struct file_info *file) { static int add_big_buffer(struct file_info *file) {
...@@ -155,7 +155,7 @@ static void cleanup_big_buffer(struct file_info *file) { ...@@ -155,7 +155,7 @@ static void cleanup_big_buffer(struct file_info *file) {
int brtloader_init_file_infos (struct file_infos *fi) { int brtloader_init_file_infos (struct file_infos *fi) {
int result = 0; int result = 0;
int r = toku_pthread_mutex_init(&fi->lock, NULL); resource_assert(r == 0); int r = toku_pthread_mutex_init(&fi->lock, NULL); resource_assert_zero(r);
fi->n_files = 0; fi->n_files = 0;
fi->n_files_limit = 1; fi->n_files_limit = 1;
fi->n_files_open = 0; fi->n_files_open = 0;
...@@ -172,7 +172,7 @@ void brtloader_fi_destroy (struct file_infos *fi, BOOL is_error) ...@@ -172,7 +172,7 @@ void brtloader_fi_destroy (struct file_infos *fi, BOOL is_error)
// If !is_error then requires that all the temp files have been closed and destroyed // If !is_error then requires that all the temp files have been closed and destroyed
// No error codes are returned. If anything goes wrong with closing and unlinking then it's only in an is_error case, so we don't care. // No error codes are returned. If anything goes wrong with closing and unlinking then it's only in an is_error case, so we don't care.
{ {
int r = toku_pthread_mutex_destroy(&fi->lock); resource_assert(r == 0); int r = toku_pthread_mutex_destroy(&fi->lock); resource_assert_zero(r);
if (!is_error) { if (!is_error) {
invariant(fi->n_files_open==0); invariant(fi->n_files_open==0);
invariant(fi->n_files_extant==0); invariant(fi->n_files_extant==0);
...@@ -201,7 +201,7 @@ static int open_file_add (struct file_infos *fi, ...@@ -201,7 +201,7 @@ static int open_file_add (struct file_infos *fi,
/* out */ FIDX *idx) /* out */ FIDX *idx)
{ {
int result = 0; int result = 0;
int r = toku_pthread_mutex_lock(&fi->lock); resource_assert(r==0); int r = toku_pthread_mutex_lock(&fi->lock); resource_assert_zero(r);
if (fi->n_files >= fi->n_files_limit) { if (fi->n_files >= fi->n_files_limit) {
fi->n_files_limit *=2; fi->n_files_limit *=2;
XREALLOC_N(fi->n_files_limit, fi->file_infos); XREALLOC_N(fi->n_files_limit, fi->file_infos);
...@@ -221,13 +221,13 @@ static int open_file_add (struct file_infos *fi, ...@@ -221,13 +221,13 @@ static int open_file_add (struct file_infos *fi,
fi->n_files_extant++; fi->n_files_extant++;
fi->n_files_open++; fi->n_files_open++;
} }
r = toku_pthread_mutex_unlock(&fi->lock); resource_assert(r==0); r = toku_pthread_mutex_unlock(&fi->lock); resource_assert_zero(r);
return result; return result;
} }
int brtloader_fi_reopen (struct file_infos *fi, FIDX idx, const char *mode) { int brtloader_fi_reopen (struct file_infos *fi, FIDX idx, const char *mode) {
int result = 0; int result = 0;
int r = toku_pthread_mutex_lock(&fi->lock); resource_assert(r==0); int r = toku_pthread_mutex_lock(&fi->lock); resource_assert_zero(r);
int i = idx.idx; int i = idx.idx;
invariant(i>=0 && i<fi->n_files); invariant(i>=0 && i<fi->n_files);
invariant(!fi->file_infos[i].is_open); invariant(!fi->file_infos[i].is_open);
...@@ -241,14 +241,14 @@ int brtloader_fi_reopen (struct file_infos *fi, FIDX idx, const char *mode) { ...@@ -241,14 +241,14 @@ int brtloader_fi_reopen (struct file_infos *fi, FIDX idx, const char *mode) {
//add_big_buffer(&fi->file_infos[i]); //add_big_buffer(&fi->file_infos[i]);
fi->n_files_open++; fi->n_files_open++;
} }
r = toku_pthread_mutex_unlock(&fi->lock); resource_assert(r==0); r = toku_pthread_mutex_unlock(&fi->lock); resource_assert_zero(r);
return result; return result;
} }
int brtloader_fi_close (struct file_infos *fi, FIDX idx) int brtloader_fi_close (struct file_infos *fi, FIDX idx)
{ {
int result = 0; int result = 0;
{ int r2 = toku_pthread_mutex_lock(&fi->lock); resource_assert(r2==0); } { int r2 = toku_pthread_mutex_lock(&fi->lock); resource_assert_zero(r2); }
invariant(idx.idx >=0 && idx.idx < fi->n_files); invariant(idx.idx >=0 && idx.idx < fi->n_files);
if (fi->file_infos[idx.idx].is_open) { if (fi->file_infos[idx.idx].is_open) {
invariant(fi->n_files_open>0); // loader-cleanup-test failure invariant(fi->n_files_open>0); // loader-cleanup-test failure
...@@ -261,13 +261,13 @@ int brtloader_fi_close (struct file_infos *fi, FIDX idx) ...@@ -261,13 +261,13 @@ int brtloader_fi_close (struct file_infos *fi, FIDX idx)
result = errno; result = errno;
} else } else
result = EINVAL; result = EINVAL;
{ int r2 = toku_pthread_mutex_unlock(&fi->lock); resource_assert(r2==0); } { int r2 = toku_pthread_mutex_unlock(&fi->lock); resource_assert_zero(r2); }
return result; return result;
} }
int brtloader_fi_unlink (struct file_infos *fi, FIDX idx) { int brtloader_fi_unlink (struct file_infos *fi, FIDX idx) {
int result = 0; int result = 0;
{ int r2 = toku_pthread_mutex_lock(&fi->lock); resource_assert(r2==0); } { int r2 = toku_pthread_mutex_lock(&fi->lock); resource_assert_zero(r2); }
int id = idx.idx; int id = idx.idx;
invariant(id >=0 && id < fi->n_files); invariant(id >=0 && id < fi->n_files);
if (fi->file_infos[id].is_extant) { // must still exist if (fi->file_infos[id].is_extant) { // must still exist
...@@ -282,7 +282,7 @@ int brtloader_fi_unlink (struct file_infos *fi, FIDX idx) { ...@@ -282,7 +282,7 @@ int brtloader_fi_unlink (struct file_infos *fi, FIDX idx) {
fi->file_infos[id].fname = NULL; fi->file_infos[id].fname = NULL;
} else } else
result = EINVAL; result = EINVAL;
{ int r2 = toku_pthread_mutex_unlock(&fi->lock); resource_assert(r2==0); } { int r2 = toku_pthread_mutex_unlock(&fi->lock); resource_assert_zero(r2); }
return result; return result;
} }
...@@ -639,11 +639,11 @@ static void brt_loader_set_panic(BRTLOADER bl, int error, BOOL callback) { ...@@ -639,11 +639,11 @@ static void brt_loader_set_panic(BRTLOADER bl, int error, BOOL callback) {
// One of the tests uses this. // One of the tests uses this.
FILE *toku_bl_fidx2file (BRTLOADER bl, FIDX i) { FILE *toku_bl_fidx2file (BRTLOADER bl, FIDX i) {
{ int r2 = toku_pthread_mutex_lock(&bl->file_infos.lock); resource_assert(r2==0); } { int r2 = toku_pthread_mutex_lock(&bl->file_infos.lock); resource_assert_zero(r2); }
invariant(i.idx >=0 && i.idx < bl->file_infos.n_files); invariant(i.idx >=0 && i.idx < bl->file_infos.n_files);
invariant(bl->file_infos.file_infos[i.idx].is_open); invariant(bl->file_infos.file_infos[i.idx].is_open);
FILE *result=bl->file_infos.file_infos[i.idx].file; FILE *result=bl->file_infos.file_infos[i.idx].file;
{ int r2 = toku_pthread_mutex_unlock(&bl->file_infos.lock); resource_assert(r2==0); } { int r2 = toku_pthread_mutex_unlock(&bl->file_infos.lock); resource_assert_zero(r2); }
return result; return result;
} }
...@@ -780,9 +780,9 @@ int loader_write_row(DBT *key, DBT *val, FIDX data, FILE *dataf, u_int64_t *data ...@@ -780,9 +780,9 @@ int loader_write_row(DBT *key, DBT *val, FIDX data, FILE *dataf, u_int64_t *data
// we have a chance to handle the errors because when we close we can delete all the files. // we have a chance to handle the errors because when we close we can delete all the files.
if ((r=bl_write_dbt(key, dataf, dataoff, bl))) return r; if ((r=bl_write_dbt(key, dataf, dataoff, bl))) return r;
if ((r=bl_write_dbt(val, dataf, dataoff, bl))) return r; if ((r=bl_write_dbt(val, dataf, dataoff, bl))) return r;
{ int r2 = toku_pthread_mutex_lock(&bl->file_infos.lock); resource_assert(r2==0); } { int r2 = toku_pthread_mutex_lock(&bl->file_infos.lock); resource_assert_zero(r2); }
bl->file_infos.file_infos[data.idx].n_rows++; bl->file_infos.file_infos[data.idx].n_rows++;
{ int r2 = toku_pthread_mutex_unlock(&bl->file_infos.lock); resource_assert(r2==0); } { int r2 = toku_pthread_mutex_unlock(&bl->file_infos.lock); resource_assert_zero(r2); }
return 0; return 0;
} }
...@@ -1006,7 +1006,7 @@ static void enqueue_for_extraction (BRTLOADER bl) { ...@@ -1006,7 +1006,7 @@ static void enqueue_for_extraction (BRTLOADER bl) {
*enqueue_me = bl->primary_rowset; *enqueue_me = bl->primary_rowset;
zero_rowset(&bl->primary_rowset); zero_rowset(&bl->primary_rowset);
int r = queue_enq(bl->primary_rowset_queue, (void*)enqueue_me, 1, NULL); int r = queue_enq(bl->primary_rowset_queue, (void*)enqueue_me, 1, NULL);
resource_assert(r==0); resource_assert_zero(r);
} }
static int loader_do_put(BRTLOADER bl, static int loader_do_put(BRTLOADER bl,
...@@ -1050,7 +1050,8 @@ static int finish_extractor (BRTLOADER bl) { ...@@ -1050,7 +1050,8 @@ static int finish_extractor (BRTLOADER bl) {
{ {
void *toku_pthread_retval; void *toku_pthread_retval;
int r = toku_pthread_join(bl->extractor_thread, &toku_pthread_retval); int r = toku_pthread_join(bl->extractor_thread, &toku_pthread_retval);
resource_assert(r==0 && toku_pthread_retval==NULL); resource_assert_zero(r);
invariant(toku_pthread_retval == NULL);
bl->extractor_live = FALSE; bl->extractor_live = FALSE;
BL_TRACE(blt_join_on_extractor); BL_TRACE(blt_join_on_extractor);
} }
...@@ -1485,7 +1486,7 @@ static int update_progress (int N, ...@@ -1485,7 +1486,7 @@ static int update_progress (int N,
{ {
// Need a lock here because of cilk and also the various pthreads. // Need a lock here because of cilk and also the various pthreads.
// Must protect the increment and the call to the poll_function. // Must protect the increment and the call to the poll_function.
{ int r = toku_pthread_mutex_lock(&update_progress_lock); resource_assert(r == 0); } { int r = toku_pthread_mutex_lock(&update_progress_lock); resource_assert_zero(r); }
bl->progress+=N; bl->progress+=N;
int result; int result;
...@@ -1498,7 +1499,7 @@ static int update_progress (int N, ...@@ -1498,7 +1499,7 @@ static int update_progress (int N,
} else { } else {
result = bl->progress_callback_result; result = bl->progress_callback_result;
} }
{ int r = toku_pthread_mutex_unlock(&update_progress_lock); resource_assert(r == 0); } { int r = toku_pthread_mutex_unlock(&update_progress_lock); resource_assert_zero(r); }
return result; return result;
} }
...@@ -1663,9 +1664,9 @@ int toku_merge_some_files_using_dbufio (const BOOL to_q, FIDX dest_data, QUEUE q ...@@ -1663,9 +1664,9 @@ int toku_merge_some_files_using_dbufio (const BOOL to_q, FIDX dest_data, QUEUE q
} }
dataoff[i] = 0; dataoff[i] = 0;
{ int r2 = toku_pthread_mutex_lock(&bl->file_infos.lock); resource_assert(r2==0); } { int r2 = toku_pthread_mutex_lock(&bl->file_infos.lock); resource_assert_zero(r2); }
n_rows += bl->file_infos.file_infos[srcs_fidxs[i].idx].n_rows; n_rows += bl->file_infos.file_infos[srcs_fidxs[i].idx].n_rows;
{ int r2 = toku_pthread_mutex_unlock(&bl->file_infos.lock); resource_assert(r2==0); } { int r2 = toku_pthread_mutex_unlock(&bl->file_infos.lock); resource_assert_zero(r2); }
} }
} }
u_int64_t n_rows_done = 0; u_int64_t n_rows_done = 0;
...@@ -1740,7 +1741,6 @@ int toku_merge_some_files_using_dbufio (const BOOL to_q, FIDX dest_data, QUEUE q ...@@ -1740,7 +1741,6 @@ int toku_merge_some_files_using_dbufio (const BOOL to_q, FIDX dest_data, QUEUE q
} else { } else {
fprintf(stderr, "%s:%d r=%d errno=%d bfs=%p mini=%d\n", __FILE__, __LINE__, r, errno, bfs, mini); fprintf(stderr, "%s:%d r=%d errno=%d bfs=%p mini=%d\n", __FILE__, __LINE__, r, errno, bfs, mini);
dbufio_print(bfs); dbufio_print(bfs);
// lazy_assert(0);
result = r; result = r;
break; break;
} }
...@@ -2602,7 +2602,7 @@ static int loader_do_i (BRTLOADER bl, ...@@ -2602,7 +2602,7 @@ static int loader_do_i (BRTLOADER bl,
int r2 = toku_pthread_join(bl->fractal_threads[which_db], &toku_pthread_retval); int r2 = toku_pthread_join(bl->fractal_threads[which_db], &toku_pthread_retval);
invariant(fta.bl==bl); // this is a gratuitous assertion to make sure that the fta struct is still live here. A previous bug but that struct into a C block statement. invariant(fta.bl==bl); // this is a gratuitous assertion to make sure that the fta struct is still live here. A previous bug but that struct into a C block statement.
BL_TRACE(blt_join_on_fractal); BL_TRACE(blt_join_on_fractal);
resource_assert(r2==0); resource_assert_zero(r2);
invariant(toku_pthread_retval==NULL); invariant(toku_pthread_retval==NULL);
invariant(bl->fractal_threads_live[which_db]); invariant(bl->fractal_threads_live[which_db]);
bl->fractal_threads_live[which_db] = FALSE; bl->fractal_threads_live[which_db] = FALSE;
......
...@@ -163,24 +163,24 @@ struct cachetable { ...@@ -163,24 +163,24 @@ struct cachetable {
// Lock the cachetable // Lock the cachetable
static inline void cachefiles_lock(CACHETABLE ct) { static inline void cachefiles_lock(CACHETABLE ct) {
int r = toku_pthread_mutex_lock(&ct->cachefiles_mutex); assert(r == 0); int r = toku_pthread_mutex_lock(&ct->cachefiles_mutex); resource_assert_zero(r);
} }
// Unlock the cachetable // Unlock the cachetable
static inline void cachefiles_unlock(CACHETABLE ct) { static inline void cachefiles_unlock(CACHETABLE ct) {
int r = toku_pthread_mutex_unlock(&ct->cachefiles_mutex); assert(r == 0); int r = toku_pthread_mutex_unlock(&ct->cachefiles_mutex); resource_assert_zero(r);
} }
// Lock the cachetable // Lock the cachetable
static inline void cachetable_lock(CACHETABLE ct __attribute__((unused))) { static inline void cachetable_lock(CACHETABLE ct __attribute__((unused))) {
int r = toku_pthread_mutex_lock(ct->mutex); assert(r == 0); int r = toku_pthread_mutex_lock(ct->mutex); resource_assert_zero(r);;
cachetable_lock_taken++; cachetable_lock_taken++;
} }
// Unlock the cachetable // Unlock the cachetable
static inline void cachetable_unlock(CACHETABLE ct __attribute__((unused))) { static inline void cachetable_unlock(CACHETABLE ct __attribute__((unused))) {
cachetable_lock_released++; cachetable_lock_released++;
int r = toku_pthread_mutex_unlock(ct->mutex); assert(r == 0); int r = toku_pthread_mutex_unlock(ct->mutex); resource_assert_zero(r);
} }
// Wait for cache table space to become available // Wait for cache table space to become available
...@@ -282,8 +282,8 @@ int toku_create_cachetable(CACHETABLE *result, long size_limit, LSN UU(initial_l ...@@ -282,8 +282,8 @@ int toku_create_cachetable(CACHETABLE *result, long size_limit, LSN UU(initial_l
ct->logger = logger; ct->logger = logger;
toku_init_workers(&ct->wq, &ct->threadpool); toku_init_workers(&ct->wq, &ct->threadpool);
ct->mutex = workqueue_lock_ref(&ct->wq); ct->mutex = workqueue_lock_ref(&ct->wq);
int r = toku_pthread_mutex_init(&ct->openfd_mutex, NULL); assert(r == 0); int r = toku_pthread_mutex_init(&ct->openfd_mutex, NULL); resource_assert_zero(r);
r = toku_pthread_mutex_init(&ct->cachefiles_mutex, 0); assert(r == 0); r = toku_pthread_mutex_init(&ct->cachefiles_mutex, 0); resource_assert_zero(r);
toku_minicron_setup(&ct->checkpointer, 0, checkpoint_thread, ct); // default is no checkpointing toku_minicron_setup(&ct->checkpointer, 0, checkpoint_thread, ct); // default is no checkpointing
r = toku_leaflock_create(&ct->leaflock_pool); assert(r==0); r = toku_leaflock_create(&ct->leaflock_pool); assert(r==0);
r = toku_omt_create(&ct->reserved_filenums); assert(r==0); r = toku_omt_create(&ct->reserved_filenums); assert(r==0);
...@@ -354,8 +354,7 @@ restart: ...@@ -354,8 +354,7 @@ restart:
//Cachefile is closing, wait till finished. //Cachefile is closing, wait till finished.
assert(extant->closefd_waiting==0); //Single client thread (any more and this needs to be re-analyzed). assert(extant->closefd_waiting==0); //Single client thread (any more and this needs to be re-analyzed).
extant->closefd_waiting++; extant->closefd_waiting++;
int rwait = toku_pthread_cond_wait(&extant->closefd_wait, ct->mutex); int rwait = toku_pthread_cond_wait(&extant->closefd_wait, ct->mutex); resource_assert_zero(rwait);
assert(rwait == 0);
restarted = TRUE; restarted = TRUE;
goto restart; //Restart and verify that it is not found in the second loop. goto restart; //Restart and verify that it is not found in the second loop.
} }
...@@ -534,7 +533,7 @@ int toku_cachetable_openfd_with_filenum (CACHEFILE *cfptr, CACHETABLE ct, int fd ...@@ -534,7 +533,7 @@ int toku_cachetable_openfd_with_filenum (CACHEFILE *cfptr, CACHETABLE ct, int fd
return r; return r;
} }
r = toku_pthread_mutex_lock(&ct->openfd_mutex); // purpose is to make this function single-threaded r = toku_pthread_mutex_lock(&ct->openfd_mutex); // purpose is to make this function single-threaded
assert(r==0); resource_assert_zero(r);
cachetable_lock(ct); cachetable_lock(ct);
cachefiles_lock(ct); cachefiles_lock(ct);
for (extant = ct->cachefiles; extant; extant=extant->next) { for (extant = ct->cachefiles; extant; extant=extant->next) {
...@@ -544,8 +543,8 @@ int toku_cachetable_openfd_with_filenum (CACHEFILE *cfptr, CACHETABLE ct, int fd ...@@ -544,8 +543,8 @@ int toku_cachetable_openfd_with_filenum (CACHEFILE *cfptr, CACHETABLE ct, int fd
if (extant->is_closing) { if (extant->is_closing) {
// if another thread is closing this file, wait until the close is fully complete // if another thread is closing this file, wait until the close is fully complete
cachefiles_unlock(ct); //Cannot hold cachefiles lock over the cond_wait cachefiles_unlock(ct); //Cannot hold cachefiles lock over the cond_wait
r = toku_pthread_cond_wait(&extant->openfd_wait, ct->mutex); r = toku_pthread_cond_wait(&extant->openfd_wait, ct->mutex);
assert(r == 0); resource_assert_zero(r);
cachefiles_lock(ct); cachefiles_lock(ct);
goto try_again; // other thread has closed this file, go create a new cachefile goto try_again; // other thread has closed this file, go create a new cachefile
} }
...@@ -608,8 +607,8 @@ int toku_cachetable_openfd_with_filenum (CACHEFILE *cfptr, CACHETABLE ct, int fd ...@@ -608,8 +607,8 @@ int toku_cachetable_openfd_with_filenum (CACHEFILE *cfptr, CACHETABLE ct, int fd
newcf->for_local_checkpoint = ZERO_LSN; newcf->for_local_checkpoint = ZERO_LSN;
newcf->checkpoint_state = CS_NOT_IN_PROGRESS; newcf->checkpoint_state = CS_NOT_IN_PROGRESS;
r = toku_pthread_cond_init(&newcf->openfd_wait, NULL); assert(r == 0); r = toku_pthread_cond_init(&newcf->openfd_wait, NULL); resource_assert_zero(r);
r = toku_pthread_cond_init(&newcf->closefd_wait, NULL); assert(r == 0); r = toku_pthread_cond_init(&newcf->closefd_wait, NULL); resource_assert_zero(r);
*cfptr = newcf; *cfptr = newcf;
r = 0; r = 0;
} }
...@@ -617,7 +616,7 @@ int toku_cachetable_openfd_with_filenum (CACHEFILE *cfptr, CACHETABLE ct, int fd ...@@ -617,7 +616,7 @@ int toku_cachetable_openfd_with_filenum (CACHEFILE *cfptr, CACHETABLE ct, int fd
cachefiles_unlock(ct); cachefiles_unlock(ct);
{ {
int rm = toku_pthread_mutex_unlock(&ct->openfd_mutex); int rm = toku_pthread_mutex_unlock(&ct->openfd_mutex);
assert (rm == 0); resource_assert_zero(rm);
} }
cachetable_unlock(ct); cachetable_unlock(ct);
return r; return r;
...@@ -808,7 +807,7 @@ int toku_cachefile_close (CACHEFILE *cfp, char **error_string, BOOL oplsn_valid, ...@@ -808,7 +807,7 @@ int toku_cachefile_close (CACHEFILE *cfp, char **error_string, BOOL oplsn_valid,
assert(cf->refcount == 1); // toku_cachetable_openfd() is single-threaded assert(cf->refcount == 1); // toku_cachetable_openfd() is single-threaded
assert(!cf->next_in_checkpoint); //checkpoint cannot run on a closing file assert(!cf->next_in_checkpoint); //checkpoint cannot run on a closing file
assert(!cf->for_checkpoint); //checkpoint cannot run on a closing file assert(!cf->for_checkpoint); //checkpoint cannot run on a closing file
rs = toku_pthread_cond_signal(&cf->openfd_wait); assert(rs == 0); rs = toku_pthread_cond_signal(&cf->openfd_wait); resource_assert_zero(rs);
} }
if (cf->closefd_waiting > 0) { if (cf->closefd_waiting > 0) {
int rs; int rs;
...@@ -819,9 +818,9 @@ int toku_cachefile_close (CACHEFILE *cfp, char **error_string, BOOL oplsn_valid, ...@@ -819,9 +818,9 @@ int toku_cachefile_close (CACHEFILE *cfp, char **error_string, BOOL oplsn_valid,
{ {
int rd; int rd;
rd = toku_pthread_cond_destroy(&cf->openfd_wait); rd = toku_pthread_cond_destroy(&cf->openfd_wait);
assert(rd == 0); resource_assert_zero(rd);
rd = toku_pthread_cond_destroy(&cf->closefd_wait); rd = toku_pthread_cond_destroy(&cf->closefd_wait);
assert(rd == 0); resource_assert_zero(rd);
} }
if (cf->fname_in_env) toku_free(cf->fname_in_env); if (cf->fname_in_env) toku_free(cf->fname_in_env);
...@@ -860,20 +859,20 @@ int toku_cachefile_close (CACHEFILE *cfp, char **error_string, BOOL oplsn_valid, ...@@ -860,20 +859,20 @@ int toku_cachefile_close (CACHEFILE *cfp, char **error_string, BOOL oplsn_valid,
if (cf->refcount > 0) { if (cf->refcount > 0) {
int rs; int rs;
assert(cf->refcount == 1); // toku_cachetable_openfd() is single-threaded assert(cf->refcount == 1); // toku_cachetable_openfd() is single-threaded
rs = toku_pthread_cond_signal(&cf->openfd_wait); assert(rs == 0); rs = toku_pthread_cond_signal(&cf->openfd_wait); resource_assert_zero(rs);
} }
if (cf->closefd_waiting > 0) { if (cf->closefd_waiting > 0) {
int rs; int rs;
assert(cf->closefd_waiting == 1); assert(cf->closefd_waiting == 1);
rs = toku_pthread_cond_signal(&cf->closefd_wait); assert(rs == 0); rs = toku_pthread_cond_signal(&cf->closefd_wait); resource_assert_zero(rs);
} }
// we can destroy the condition variables because if there was another thread waiting, it was already signalled // we can destroy the condition variables because if there was another thread waiting, it was already signalled
{ {
int rd; int rd;
rd = toku_pthread_cond_destroy(&cf->openfd_wait); rd = toku_pthread_cond_destroy(&cf->openfd_wait);
assert(rd == 0); resource_assert_zero(rd);
rd = toku_pthread_cond_destroy(&cf->closefd_wait); rd = toku_pthread_cond_destroy(&cf->closefd_wait);
assert(rd == 0); resource_assert_zero(rd);
} }
rwlock_write_lock(&cf->fdlock, ct->mutex); //Just make sure we can get it. rwlock_write_lock(&cf->fdlock, ct->mutex); //Just make sure we can get it.
cachetable_unlock(ct); cachetable_unlock(ct);
...@@ -1869,12 +1868,12 @@ toku_cachetable_close (CACHETABLE *ctp) { ...@@ -1869,12 +1868,12 @@ toku_cachetable_close (CACHETABLE *ctp) {
} }
assert(ct->size_writing == 0); assert(ct->size_writing == 0);
rwlock_destroy(&ct->pending_lock); rwlock_destroy(&ct->pending_lock);
r = toku_pthread_mutex_destroy(&ct->openfd_mutex); assert(r == 0); r = toku_pthread_mutex_destroy(&ct->openfd_mutex); resource_assert_zero(r);
cachetable_unlock(ct); cachetable_unlock(ct);
toku_destroy_workers(&ct->wq, &ct->threadpool); toku_destroy_workers(&ct->wq, &ct->threadpool);
r = toku_leaflock_destroy(&ct->leaflock_pool); assert(r==0); r = toku_leaflock_destroy(&ct->leaflock_pool); assert_zero(r);
toku_omt_destroy(&ct->reserved_filenums); toku_omt_destroy(&ct->reserved_filenums);
r = toku_pthread_mutex_destroy(&ct->cachefiles_mutex); assert(r == 0); r = toku_pthread_mutex_destroy(&ct->cachefiles_mutex); resource_assert_zero(r);
toku_free(ct->table); toku_free(ct->table);
toku_free(ct->env_dir); toku_free(ct->env_dir);
toku_free(ct); toku_free(ct);
......
...@@ -50,8 +50,8 @@ toku_thread_create(struct toku_thread_pool *pool, struct toku_thread **toku_thre ...@@ -50,8 +50,8 @@ toku_thread_create(struct toku_thread_pool *pool, struct toku_thread **toku_thre
} else { } else {
memset(thread, 0, sizeof *thread); memset(thread, 0, sizeof *thread);
thread->pool = pool; thread->pool = pool;
r = toku_pthread_cond_init(&thread->wait, NULL); invariant(r == 0); r = toku_pthread_cond_init(&thread->wait, NULL); resource_assert_zero(r);
r = toku_pthread_create(&thread->tid, NULL, toku_thread_run_internal, thread); invariant(r == 0); r = toku_pthread_create(&thread->tid, NULL, toku_thread_run_internal, thread); resource_assert_zero(r);
*toku_thread_return = thread; *toku_thread_return = thread;
} }
return r; return r;
...@@ -64,7 +64,7 @@ toku_thread_run(struct toku_thread *thread, void *(*f)(void *arg), void *arg) { ...@@ -64,7 +64,7 @@ toku_thread_run(struct toku_thread *thread, void *(*f)(void *arg), void *arg) {
thread->f = f; thread->f = f;
thread->arg = arg; thread->arg = arg;
toku_thread_pool_unlock(thread->pool); toku_thread_pool_unlock(thread->pool);
r = toku_pthread_cond_signal(&thread->wait); invariant(r == 0); r = toku_pthread_cond_signal(&thread->wait); resource_assert_zero(r);
} }
static void static void
...@@ -76,14 +76,14 @@ toku_thread_destroy(struct toku_thread *thread) { ...@@ -76,14 +76,14 @@ toku_thread_destroy(struct toku_thread *thread) {
toku_thread_pool_lock(pool); toku_thread_pool_lock(pool);
toku_list_remove(&thread->free_link); toku_list_remove(&thread->free_link);
toku_thread_pool_unlock(pool); toku_thread_pool_unlock(pool);
r = toku_pthread_cond_destroy(&thread->wait); invariant(r == 0); r = toku_pthread_cond_destroy(&thread->wait); resource_assert_zero(r);
toku_free(thread); toku_free(thread);
} }
static void static void
toku_thread_ask_exit(struct toku_thread *thread) { toku_thread_ask_exit(struct toku_thread *thread) {
thread->doexit = 1; thread->doexit = 1;
int r = toku_pthread_cond_signal(&thread->wait); invariant(r == 0); int r = toku_pthread_cond_signal(&thread->wait); resource_assert_zero(r);
} }
static void * static void *
...@@ -93,13 +93,13 @@ toku_thread_run_internal(void *arg) { ...@@ -93,13 +93,13 @@ toku_thread_run_internal(void *arg) {
int r; int r;
toku_thread_pool_lock(pool); toku_thread_pool_lock(pool);
while (1) { while (1) {
r = toku_pthread_cond_signal(&pool->wait_free); invariant(r == 0); r = toku_pthread_cond_signal(&pool->wait_free); resource_assert_zero(r);
void *(*thread_f)(void *); void *thread_arg; int doexit; void *(*thread_f)(void *); void *thread_arg; int doexit;
while (1) { while (1) {
thread_f = thread->f; thread_arg = thread->arg; doexit = thread->doexit; // make copies of these variables to make helgrind happy thread_f = thread->f; thread_arg = thread->arg; doexit = thread->doexit; // make copies of these variables to make helgrind happy
if (thread_f || doexit) if (thread_f || doexit)
break; break;
r = toku_pthread_cond_wait(&thread->wait, &pool->lock); invariant(r == 0); r = toku_pthread_cond_wait(&thread->wait, &pool->lock); resource_assert_zero(r);
} }
toku_thread_pool_unlock(pool); toku_thread_pool_unlock(pool);
if (thread_f) if (thread_f)
...@@ -121,10 +121,10 @@ toku_thread_pool_create(struct toku_thread_pool **pool_return, int max_threads) ...@@ -121,10 +121,10 @@ toku_thread_pool_create(struct toku_thread_pool **pool_return, int max_threads)
r = errno; r = errno;
} else { } else {
memset(pool, 0, sizeof *pool); memset(pool, 0, sizeof *pool);
r = toku_pthread_mutex_init(&pool->lock, NULL); invariant(r == 0); r = toku_pthread_mutex_init(&pool->lock, NULL); resource_assert_zero(r);
toku_list_init(&pool->free_threads); toku_list_init(&pool->free_threads);
toku_list_init(&pool->all_threads); toku_list_init(&pool->all_threads);
r = toku_pthread_cond_init(&pool->wait_free, NULL); invariant(r == 0); r = toku_pthread_cond_init(&pool->wait_free, NULL); resource_assert_zero(r);
pool->cur_threads = 0; pool->cur_threads = 0;
pool->max_threads = max_threads; pool->max_threads = max_threads;
*pool_return = pool; *pool_return = pool;
...@@ -135,12 +135,12 @@ toku_thread_pool_create(struct toku_thread_pool **pool_return, int max_threads) ...@@ -135,12 +135,12 @@ toku_thread_pool_create(struct toku_thread_pool **pool_return, int max_threads)
static void static void
toku_thread_pool_lock(struct toku_thread_pool *pool) { toku_thread_pool_lock(struct toku_thread_pool *pool) {
int r = toku_pthread_mutex_lock(&pool->lock); invariant(r == 0); int r = toku_pthread_mutex_lock(&pool->lock); resource_assert_zero(r);
} }
static void static void
toku_thread_pool_unlock(struct toku_thread_pool *pool) { toku_thread_pool_unlock(struct toku_thread_pool *pool) {
int r = toku_pthread_mutex_unlock(&pool->lock); invariant(r == 0); int r = toku_pthread_mutex_unlock(&pool->lock); resource_assert_zero(r);
} }
void void
...@@ -169,8 +169,8 @@ toku_thread_pool_destroy(struct toku_thread_pool **poolptr) { ...@@ -169,8 +169,8 @@ toku_thread_pool_destroy(struct toku_thread_pool **poolptr) {
// cleanup // cleanup
int r; int r;
r = toku_pthread_cond_destroy(&pool->wait_free); invariant(r == 0); r = toku_pthread_cond_destroy(&pool->wait_free); resource_assert_zero(r);
r = toku_pthread_mutex_destroy(&pool->lock); invariant(r == 0); r = toku_pthread_mutex_destroy(&pool->lock); resource_assert_zero(r);
toku_free(pool); toku_free(pool);
} }
...@@ -183,7 +183,7 @@ toku_thread_pool_add(struct toku_thread_pool *pool) { ...@@ -183,7 +183,7 @@ toku_thread_pool_add(struct toku_thread_pool *pool) {
pool->cur_threads += 1; pool->cur_threads += 1;
toku_list_push(&pool->all_threads, &thread->all_link); toku_list_push(&pool->all_threads, &thread->all_link);
toku_list_push(&pool->free_threads, &thread->free_link); toku_list_push(&pool->free_threads, &thread->free_link);
r = toku_pthread_cond_signal(&pool->wait_free); invariant(r == 0); r = toku_pthread_cond_signal(&pool->wait_free); resource_assert_zero(r);
} }
return r; return r;
} }
...@@ -204,7 +204,7 @@ toku_thread_pool_get_one(struct toku_thread_pool *pool, int dowait, struct toku_ ...@@ -204,7 +204,7 @@ toku_thread_pool_get_one(struct toku_thread_pool *pool, int dowait, struct toku_
break; break;
} }
pool->get_blocks++; pool->get_blocks++;
r = toku_pthread_cond_wait(&pool->wait_free, &pool->lock); invariant(r == 0); r = toku_pthread_cond_wait(&pool->wait_free, &pool->lock); resource_assert_zero(r);
} }
if (r == 0) { if (r == 0) {
struct toku_list *list = toku_list_pop_head(&pool->free_threads); struct toku_list *list = toku_list_pop_head(&pool->free_threads);
......
...@@ -28,28 +28,28 @@ struct workset { ...@@ -28,28 +28,28 @@ struct workset {
static inline void static inline void
workset_init(struct workset *ws) { workset_init(struct workset *ws) {
int r; int r;
r = toku_pthread_mutex_init(&ws->lock, NULL); invariant(r == 0); r = toku_pthread_mutex_init(&ws->lock, NULL); resource_assert_zero(r);
toku_list_init(&ws->worklist); toku_list_init(&ws->worklist);
ws->refs = 1; // the calling thread gets a reference ws->refs = 1; // the calling thread gets a reference
r = toku_pthread_cond_init(&ws->worker_wait, NULL); invariant(r == 0); r = toku_pthread_cond_init(&ws->worker_wait, NULL); resource_assert_zero(r);
} }
static inline void static inline void
workset_destroy(struct workset *ws) { workset_destroy(struct workset *ws) {
invariant(toku_list_empty(&ws->worklist)); invariant(toku_list_empty(&ws->worklist));
int r; int r;
r = toku_pthread_cond_destroy(&ws->worker_wait); invariant(r == 0); r = toku_pthread_cond_destroy(&ws->worker_wait); resource_assert_zero(r);
r = toku_pthread_mutex_destroy(&ws->lock); invariant(r == 0); r = toku_pthread_mutex_destroy(&ws->lock); resource_assert_zero(r);
} }
static inline void static inline void
workset_lock(struct workset *ws) { workset_lock(struct workset *ws) {
int r = toku_pthread_mutex_lock(&ws->lock); invariant(r == 0); int r = toku_pthread_mutex_lock(&ws->lock); resource_assert_zero(r);
} }
static inline void static inline void
workset_unlock(struct workset *ws) { workset_unlock(struct workset *ws) {
int r = toku_pthread_mutex_unlock(&ws->lock); invariant(r == 0); int r = toku_pthread_mutex_unlock(&ws->lock); resource_assert_zero(r);
} }
// Put work in the workset. Assume the workset is already locked. // Put work in the workset. Assume the workset is already locked.
...@@ -92,7 +92,7 @@ static inline void ...@@ -92,7 +92,7 @@ static inline void
workset_release_ref(struct workset *ws) { workset_release_ref(struct workset *ws) {
workset_lock(ws); workset_lock(ws);
if (--ws->refs == 0) { if (--ws->refs == 0) {
int r = toku_pthread_cond_broadcast(&ws->worker_wait); invariant(r == 0); int r = toku_pthread_cond_broadcast(&ws->worker_wait); resource_assert_zero(r);
} }
workset_unlock(ws); workset_unlock(ws);
} }
...@@ -102,7 +102,7 @@ static inline void ...@@ -102,7 +102,7 @@ static inline void
workset_join(struct workset *ws) { workset_join(struct workset *ws) {
workset_lock(ws); workset_lock(ws);
while (ws->refs != 0) { while (ws->refs != 0) {
int r = toku_pthread_cond_wait(&ws->worker_wait, &ws->lock); invariant(r == 0); int r = toku_pthread_cond_wait(&ws->worker_wait, &ws->lock); resource_assert_zero(r);
} }
workset_unlock(ws); workset_unlock(ws);
} }
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
/* This version will complain if NDEBUG is set. */ /* This version will complain if NDEBUG is set. */
/* It evaluates the argument and then calls a function toku_do_assert() which takes all the hits for the branches not taken. */ /* It evaluates the argument and then calls a function toku_do_assert() which takes all the hits for the branches not taken. */
#include <stdint.h>
#include "c_dialects.h" #include "c_dialects.h"
#include "errno.h" #include "errno.h"
...@@ -13,8 +14,10 @@ C_BEGIN ...@@ -13,8 +14,10 @@ C_BEGIN
#error NDEBUG should not be set #error NDEBUG should not be set
#endif #endif
void toku_do_assert(int /*expr*/,const char*/*expr_as_string*/,const char */*fun*/,const char*/*file*/,int/*line*/, int/*errno*/) __attribute__((__visibility__("default")));
void toku_do_assert_fail(const char*/*expr_as_string*/,const char */*fun*/,const char*/*file*/,int/*line*/, int/*errno*/) __attribute__((__visibility__("default"))) __attribute__((__noreturn__)); void toku_do_assert_fail(const char*/*expr_as_string*/,const char */*fun*/,const char*/*file*/,int/*line*/, int/*errno*/) __attribute__((__visibility__("default"))) __attribute__((__noreturn__));
void toku_do_assert(int,const char*/*expr_as_string*/,const char */*fun*/,const char*/*file*/,int/*line*/, int/*errno*/) __attribute__((__visibility__("default"))); void toku_do_assert_zero_fail(uintptr_t/*expr*/, const char*/*expr_as_string*/,const char */*fun*/,const char*/*file*/,int/*line*/, int/*errno*/) __attribute__((__visibility__("default"))) __attribute__((__noreturn__));
// Define GCOV if you want to get test-coverage information that ignores the assert statements. // Define GCOV if you want to get test-coverage information that ignores the assert statements.
// #define GCOV // #define GCOV
...@@ -22,9 +25,11 @@ void toku_do_assert(int,const char*/*expr_as_string*/,const char */*fun*/,const ...@@ -22,9 +25,11 @@ void toku_do_assert(int,const char*/*expr_as_string*/,const char */*fun*/,const
extern void (*do_assert_hook)(void); // Set this to a function you want called after printing the assertion failure message but before calling abort(). By default this is NULL. extern void (*do_assert_hook)(void); // Set this to a function you want called after printing the assertion failure message but before calling abort(). By default this is NULL.
#if defined(GCOV) || TOKU_WINDOWS #if defined(GCOV) || TOKU_WINDOWS
#define assert(expr) toku_do_assert((expr) != 0, #expr, __FUNCTION__, __FILE__, __LINE__, errno) #define assert(expr) toku_do_assert((expr) != 0, #expr, __FUNCTION__, __FILE__, __LINE__, errno)
#define assert_zero(expr) toku_do_assert((expr) == 0, #expr, __FUNCTION__, __FILE__, __LINE__, errno)
#else #else
#define assert(expr) ((expr) ? (void)0 : toku_do_assert_fail(#expr, __FUNCTION__, __FILE__, __LINE__, errno)) #define assert(expr) ((expr) ? (void)0 : toku_do_assert_fail(#expr, __FUNCTION__, __FILE__, __LINE__, errno))
#define assert_zero(expr) ((expr) == 0 ? (void)0 : toku_do_assert_zero_fail((uintptr_t)(expr), #expr, __FUNCTION__, __FILE__, __LINE__, errno))
#endif #endif
#ifdef GCOV #ifdef GCOV
...@@ -35,9 +40,12 @@ extern void (*do_assert_hook)(void); // Set this to a function you want called a ...@@ -35,9 +40,12 @@ extern void (*do_assert_hook)(void); // Set this to a function you want called a
#define WHEN_NOT_GCOV(x) x #define WHEN_NOT_GCOV(x) x
#endif #endif
#define lazy_assert(a) assert(a) // indicates code is incomplete #define lazy_assert(a) assert(a) // indicates code is incomplete
#define invariant(a) assert(a) // indicates a code invariant that must be true #define lazy_assert_zero(a) assert_zero(a) // indicates code is incomplete
#define resource_assert(a) assert(a) // indicates resource must be available, otherwise unrecoverable #define invariant(a) assert(a) // indicates a code invariant that must be true
#define invariant_zero(a) assert_zero(a) // indicates a code invariant that must be true
#define resource_assert(a) assert(a) // indicates resource must be available, otherwise unrecoverable
#define resource_assert_zero(a) assert_zero(a) // indicates resource must be available, otherwise unrecoverable
C_END C_END
......
...@@ -19,42 +19,56 @@ static void *backtrace_pointers[N_POINTERS]; ...@@ -19,42 +19,56 @@ static void *backtrace_pointers[N_POINTERS];
void (*do_assert_hook)(void) = NULL; void (*do_assert_hook)(void) = NULL;
void toku_do_assert_fail (const char* expr_as_string,const char *function,const char*file,int line, int caller_errno) static void toku_do_backtrace_abort(void) __attribute__((noreturn));
{
fprintf(stderr, "%s:%d %s: Assertion `%s' failed (errno=%d)\n", file,line,function,expr_as_string, caller_errno);
// backtrace static void
toku_do_backtrace_abort(void) {
// backtrace
#if !TOKU_WINDOWS #if !TOKU_WINDOWS
int n = backtrace(backtrace_pointers, N_POINTERS); int n = backtrace(backtrace_pointers, N_POINTERS);
fprintf(stderr, "Backtrace: (Note: toku_do_assert=0x%p)\n", toku_do_assert); fflush(stderr); fprintf(stderr, "Backtrace: (Note: toku_do_assert=0x%p)\n", toku_do_assert); fflush(stderr);
backtrace_symbols_fd(backtrace_pointers, n, fileno(stderr)); backtrace_symbols_fd(backtrace_pointers, n, fileno(stderr));
#endif #endif
fflush(stderr); fflush(stderr);
#if TOKU_WINDOWS #if TOKU_WINDOWS
//Following commented methods will not always end the process (could hang). //Following commented methods will not always end the process (could hang).
//They could be unacceptable for other reasons as well (popups, //They could be unacceptable for other reasons as well (popups,
//flush buffers before quitting, etc) //flush buffers before quitting, etc)
// abort() // abort()
// assert(FALSE) (assert.h assert) // assert(FALSE) (assert.h assert)
// raise(SIGABRT) // raise(SIGABRT)
// divide by 0 // divide by 0
// null dereference // null dereference
// _exit // _exit
// exit // exit
// ExitProcess // ExitProcess
TerminateProcess(GetCurrentProcess(), 134); //Only way found so far to unconditionally TerminateProcess(GetCurrentProcess(), 134); //Only way found so far to unconditionally
//Terminate the process //Terminate the process
#endif #endif
if (do_assert_hook) do_assert_hook(); if (do_assert_hook) do_assert_hook();
abort();
}
void
toku_do_assert_fail (const char *expr_as_string, const char *function, const char *file, int line, int caller_errno) {
fprintf(stderr, "%s:%d %s: Assertion `%s' failed (errno=%d)\n", file, line, function, expr_as_string, caller_errno);
toku_do_backtrace_abort();
}
abort(); void
toku_do_assert_zero_fail (uintptr_t expr, const char *expr_as_string, const char *function, const char *file, int line, int caller_errno) {
fprintf(stderr, "%s:%d %s: Assertion `%s == 0' failed (errno=%d) (%s=%"PRIuPTR")\n", file, line, function, expr_as_string, caller_errno, expr_as_string, expr);
toku_do_backtrace_abort();
} }
void toku_do_assert(int expr,const char* expr_as_string,const char *function,const char*file,int line, int caller_errno) { void
if (expr==0) { toku_do_assert(int expr, const char *expr_as_string, const char *function, const char* file, int line, int caller_errno) {
toku_do_assert_fail(expr_as_string, function, file, line, caller_errno); if (expr == 0)
} toku_do_assert_fail(expr_as_string, function, file, line, caller_errno);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment