Commit 19973590 authored by Zardosht Kasheff's avatar Zardosht Kasheff Committed by Yoni Fogel

[t:3651], merge to main

git-svn-id: file:///svn/toku/tokudb@32620 c7de825b-a66e-492c-adef-691d508d4ae1
parent f1c9f9dc
...@@ -177,6 +177,10 @@ struct brtnode_partition { ...@@ -177,6 +177,10 @@ struct brtnode_partition {
// a struct brtnode_leaf_basement_node for leaf nodes // a struct brtnode_leaf_basement_node for leaf nodes
// //
void* ptr; void* ptr;
// clock count used to for pe_callback to determine if a node should be evicted or not
// for now, saturating the count at 1
u_int8_t clock_count;
}; };
// brtnode partition macros // brtnode partition macros
...@@ -187,6 +191,19 @@ struct brtnode_partition { ...@@ -187,6 +191,19 @@ struct brtnode_partition {
#define BP_OFFSET(node,i) ((node)->bp[i].offset) #define BP_OFFSET(node,i) ((node)->bp[i].offset)
#define BP_SUBTREE_EST(node,i) ((node)->bp[i].subtree_estimates) #define BP_SUBTREE_EST(node,i) ((node)->bp[i].subtree_estimates)
//
// macros for managing a node's clock
// Should be managed by brt.c, NOT by serialize/deserialize
//
#define BP_TOUCH_CLOCK(node, i) ((node)->bp[i].clock_count = 1)
#define BP_SWEEP_CLOCK(node, i) ((node)->bp[i].clock_count = 0)
#define BP_SHOULD_EVICT(node, i) ((node)->bp[i].clock_count == 0)
// not crazy about having these two here, one is for the case where we create new
// nodes, such as in splits and creating new roots, and the other is for when
// we are deserializing a node and not all bp's are touched
#define BP_INIT_TOUCHED_CLOCK(node, i) ((node)->bp[i].clock_count = 1)
#define BP_INIT_UNTOUCHED_CLOCK(node, i) ((node)->bp[i].clock_count = 0)
// internal node macros // internal node macros
#define BNC_BUFFER(node,i) (((struct brtnode_nonleaf_childinfo*)((node)->bp[i].ptr))->buffer) #define BNC_BUFFER(node,i) (((struct brtnode_nonleaf_childinfo*)((node)->bp[i].ptr))->buffer)
#define BNC_NBYTESINBUF(node,i) (((struct brtnode_nonleaf_childinfo*)((node)->bp[i].ptr))->n_bytes_in_buffer) #define BNC_NBYTESINBUF(node,i) (((struct brtnode_nonleaf_childinfo*)((node)->bp[i].ptr))->n_bytes_in_buffer)
...@@ -323,6 +340,7 @@ int toku_serialize_rollback_log_to (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE ...@@ -323,6 +340,7 @@ int toku_serialize_rollback_log_to (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE
struct brt_header *h, int n_workitems, int n_threads, struct brt_header *h, int n_workitems, int n_threads,
BOOL for_checkpoint); BOOL for_checkpoint);
int toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, u_int32_t fullhash, ROLLBACK_LOG_NODE *logp, struct brt_header *h); int toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, u_int32_t fullhash, ROLLBACK_LOG_NODE *logp, struct brt_header *h);
void toku_deserialize_bp_from_disk(BRTNODE node, int childnum, int fd, struct brtnode_fetch_extra* bfe);
void toku_deserialize_bp_from_compressed(BRTNODE node, int childnum); void toku_deserialize_bp_from_compressed(BRTNODE node, int childnum);
int toku_deserialize_brtnode_from (int fd, BLOCKNUM off, u_int32_t /*fullhash*/, BRTNODE *brtnode, struct brtnode_fetch_extra* bfe); int toku_deserialize_brtnode_from (int fd, BLOCKNUM off, u_int32_t /*fullhash*/, BRTNODE *brtnode, struct brtnode_fetch_extra* bfe);
unsigned int toku_serialize_brtnode_size(BRTNODE node); /* How much space will it take? */ unsigned int toku_serialize_brtnode_size(BRTNODE node); /* How much space will it take? */
...@@ -363,7 +381,7 @@ extern void toku_brtnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM n ...@@ -363,7 +381,7 @@ extern void toku_brtnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM n
extern int toku_brtnode_fetch_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename, u_int32_t fullhash, void **brtnode_pv, long *sizep, int*dirty, void*extraargs); extern int toku_brtnode_fetch_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename, u_int32_t fullhash, void **brtnode_pv, long *sizep, int*dirty, void*extraargs);
extern int toku_brtnode_pe_callback (void *brtnode_pv, long bytes_to_free, long* bytes_freed, void *extraargs); extern int toku_brtnode_pe_callback (void *brtnode_pv, long bytes_to_free, long* bytes_freed, void *extraargs);
extern BOOL toku_brtnode_pf_req_callback(void* brtnode_pv, void* read_extraargs); extern BOOL toku_brtnode_pf_req_callback(void* brtnode_pv, void* read_extraargs);
extern int toku_brtnode_pf_callback(void* brtnode_pv, void* read_extraargs, long* sizep); int toku_brtnode_pf_callback(void* brtnode_pv, void* read_extraargs, int fd, long* sizep);
extern int toku_brt_alloc_init_header(BRT t, TOKUTXN txn); extern int toku_brt_alloc_init_header(BRT t, TOKUTXN txn);
extern int toku_read_brt_header_and_store_in_cachefile (CACHEFILE cf, LSN max_acceptable_lsn, struct brt_header **header, BOOL* was_open); extern int toku_read_brt_header_and_store_in_cachefile (CACHEFILE cf, LSN max_acceptable_lsn, struct brt_header **header, BOOL* was_open);
extern CACHEKEY* toku_calculate_root_offset_pointer (BRT brt, u_int32_t *root_hash); extern CACHEKEY* toku_calculate_root_offset_pointer (BRT brt, u_int32_t *root_hash);
......
...@@ -584,6 +584,7 @@ rebalance_brtnode_leaf(BRTNODE node) ...@@ -584,6 +584,7 @@ rebalance_brtnode_leaf(BRTNODE node)
BLB_NBYTESINBUF(node, i) = sum_info.dsum; BLB_NBYTESINBUF(node, i) = sum_info.dsum;
BP_STATE(node,i) = PT_AVAIL; BP_STATE(node,i) = PT_AVAIL;
BP_TOUCH_CLOCK(node,i);
} }
// now the subtree estimates // now the subtree estimates
toku_brt_leaf_reset_calc_leaf_stats(node); toku_brt_leaf_reset_calc_leaf_stats(node);
...@@ -992,9 +993,11 @@ setup_brtnode_partitions(BRTNODE node, struct brtnode_fetch_extra* bfe) { ...@@ -992,9 +993,11 @@ setup_brtnode_partitions(BRTNODE node, struct brtnode_fetch_extra* bfe) {
// setup memory needed for the node // setup memory needed for the node
// //
for (int i = 0; i < node->n_children; i++) { for (int i = 0; i < node->n_children; i++) {
BP_INIT_UNTOUCHED_CLOCK(node,i);
BP_STATE(node,i) = toku_brtnode_partition_state(bfe, i); BP_STATE(node,i) = toku_brtnode_partition_state(bfe, i);
if (BP_STATE(node,i) == PT_AVAIL) { if (BP_STATE(node,i) == PT_AVAIL) {
setup_available_brtnode_partition(node, i); setup_available_brtnode_partition(node, i);
BP_TOUCH_CLOCK(node,i);
} }
else if (BP_STATE(node,i) == PT_COMPRESSED) { else if (BP_STATE(node,i) == PT_COMPRESSED) {
node->bp[i].ptr = toku_xmalloc(sizeof(struct sub_block)); node->bp[i].ptr = toku_xmalloc(sizeof(struct sub_block));
...@@ -1181,6 +1184,52 @@ cleanup: ...@@ -1181,6 +1184,52 @@ cleanup:
return r; return r;
} }
void
toku_deserialize_bp_from_disk(BRTNODE node, int childnum, int fd, struct brtnode_fetch_extra* bfe) {
assert(BP_STATE(node,childnum) == PT_ON_DISK);
assert(node->bp[childnum].ptr == NULL);
//
// setup the partition
//
setup_available_brtnode_partition(node, childnum);
BP_STATE(node,childnum) = PT_AVAIL;
//
// read off disk and make available in memory
//
// get the file offset and block size for the block
DISKOFF node_offset, total_node_disk_size;
toku_translate_blocknum_to_offset_size(
bfe->h->blocktable,
node->thisnodename,
&node_offset,
&total_node_disk_size
);
u_int32_t curr_offset = (childnum==0) ? 0 : BP_OFFSET(node,childnum-1);
curr_offset += node->bp_offset;
u_int32_t curr_size = (childnum==0) ? BP_OFFSET(node,childnum) : (BP_OFFSET(node,childnum) - BP_OFFSET(node,childnum-1));
struct rbuf rb = {.buf = NULL, .size = 0, .ndone = 0};
u_int8_t *XMALLOC_N(curr_size, raw_block);
rbuf_init(&rb, raw_block, curr_size);
{
// read the block
ssize_t rlen = toku_os_pread(fd, raw_block, curr_size, node_offset+curr_offset);
lazy_assert((DISKOFF)rlen == curr_size);
}
struct sub_block curr_sb;
sub_block_init(&curr_sb);
read_and_decompress_sub_block(&rb, &curr_sb);
// at this point, sb->uncompressed_ptr stores the serialized node partition
deserialize_brtnode_partition(&curr_sb, node, childnum);
toku_free(curr_sb.uncompressed_ptr);
toku_free(raw_block);
}
// Take a brtnode partition that is in the compressed state, and make it avail // Take a brtnode partition that is in the compressed state, and make it avail
void void
toku_deserialize_bp_from_compressed(BRTNODE node, int childnum) { toku_deserialize_bp_from_compressed(BRTNODE node, int childnum) {
......
...@@ -513,6 +513,17 @@ next_dict_id(void) { ...@@ -513,6 +513,17 @@ next_dict_id(void) {
return d; return d;
} }
static void
destroy_basement_node (BASEMENTNODE bn)
{
// The buffer may have been freed already, in some cases.
if (bn->buffer) {
toku_omt_destroy(&bn->buffer);
bn->buffer = NULL;
}
}
u_int8_t u_int8_t
toku_brtnode_partition_state (struct brtnode_fetch_extra* bfe, int childnum) toku_brtnode_partition_state (struct brtnode_fetch_extra* bfe, int childnum)
{ {
...@@ -567,7 +578,6 @@ int toku_brtnode_fetch_callback (CACHEFILE UU(cachefile), int fd, BLOCKNUM noden ...@@ -567,7 +578,6 @@ int toku_brtnode_fetch_callback (CACHEFILE UU(cachefile), int fd, BLOCKNUM noden
assert(*brtnode_pv == NULL); assert(*brtnode_pv == NULL);
struct brtnode_fetch_extra *bfe = (struct brtnode_fetch_extra *)extraargs; struct brtnode_fetch_extra *bfe = (struct brtnode_fetch_extra *)extraargs;
BRTNODE *result=(BRTNODE*)brtnode_pv; BRTNODE *result=(BRTNODE*)brtnode_pv;
// TODO: (Zardosht) pass in bfe to toku_deserialize_brtnode_from so it can do the right thing
int r = toku_deserialize_brtnode_from(fd, nodename, fullhash, result, bfe); int r = toku_deserialize_brtnode_from(fd, nodename, fullhash, result, bfe);
if (r == 0) { if (r == 0) {
*sizep = brtnode_memory_size(*result); *sizep = brtnode_memory_size(*result);
...@@ -579,14 +589,61 @@ int toku_brtnode_fetch_callback (CACHEFILE UU(cachefile), int fd, BLOCKNUM noden ...@@ -579,14 +589,61 @@ int toku_brtnode_fetch_callback (CACHEFILE UU(cachefile), int fd, BLOCKNUM noden
// callback for partially evicting a node // callback for partially evicting a node
int toku_brtnode_pe_callback (void *brtnode_pv, long bytes_to_free, long* bytes_freed, void* UU(extraargs)) { int toku_brtnode_pe_callback (void *brtnode_pv, long bytes_to_free, long* bytes_freed, void* UU(extraargs)) {
BRTNODE node = (BRTNODE)brtnode_pv; BRTNODE node = (BRTNODE)brtnode_pv;
assert(node); long orig_size = brtnode_memory_size(node);
*bytes_freed = 0;
assert(bytes_to_free > 0); assert(bytes_to_free > 0);
//
// nothing on internal nodes for now
//
if (node->dirty || node->height > 0) {
*bytes_freed = 0;
}
//
// partial eviction strategy for basement nodes:
// if the bn is compressed, evict it
// else: check if it requires eviction, if it does, evict it, if not, sweep the clock count
//
//
else {
for (int i = 0; i < node->n_children; i++) {
// Get rid of compressed stuff no matter what.
if (BP_STATE(node,i) == PT_COMPRESSED) {
struct sub_block* sb = (struct sub_block*)node->bp[i].ptr;
toku_free(sb->compressed_ptr);
toku_free(node->bp[i].ptr);
node->bp[i].ptr = NULL;
BP_STATE(node,i) = PT_ON_DISK;
}
else if (BP_STATE(node,i) == PT_AVAIL) {
if (BP_SHOULD_EVICT(node,i)) {
// free the basement node
BASEMENTNODE bn = (BASEMENTNODE)node->bp[i].ptr;
OMT curr_omt = BLB_BUFFER(node, i);
toku_omt_free_items(curr_omt);
destroy_basement_node(bn);
toku_free(node->bp[i].ptr);
node->bp[i].ptr = NULL;
BP_STATE(node,i) = PT_ON_DISK;
}
else {
BP_SWEEP_CLOCK(node,i);
}
}
else if (BP_STATE(node,i) == PT_ON_DISK) {
continue;
}
else {
assert(FALSE);
}
}
}
*bytes_freed = orig_size - brtnode_memory_size(node);
return 0; return 0;
} }
// callback that sates if partially reading a node is necessary // callback that states if partially reading a node is necessary
// could have just used toku_brtnode_fetch_callback, but wanted to separate the two cases to separate functions // could have just used toku_brtnode_fetch_callback, but wanted to separate the two cases to separate functions
BOOL toku_brtnode_pf_req_callback(void* brtnode_pv, void* read_extraargs) { BOOL toku_brtnode_pf_req_callback(void* brtnode_pv, void* read_extraargs) {
// placeholder for now // placeholder for now
...@@ -599,6 +656,13 @@ BOOL toku_brtnode_pf_req_callback(void* brtnode_pv, void* read_extraargs) { ...@@ -599,6 +656,13 @@ BOOL toku_brtnode_pf_req_callback(void* brtnode_pv, void* read_extraargs) {
else if (bfe->type == brtnode_fetch_all) { else if (bfe->type == brtnode_fetch_all) {
retval = FALSE; retval = FALSE;
for (int i = 0; i < node->n_children; i++) { for (int i = 0; i < node->n_children; i++) {
BP_TOUCH_CLOCK(node,i);
}
for (int i = 0; i < node->n_children; i++) {
BP_TOUCH_CLOCK(node,i);
// if we find a partition that is not available,
// then a partial fetch is required because
// the entire node must be made available
if (BP_STATE(node,i) != PT_AVAIL) { if (BP_STATE(node,i) != PT_AVAIL) {
retval = TRUE; retval = TRUE;
break; break;
...@@ -618,6 +682,7 @@ BOOL toku_brtnode_pf_req_callback(void* brtnode_pv, void* read_extraargs) { ...@@ -618,6 +682,7 @@ BOOL toku_brtnode_pf_req_callback(void* brtnode_pv, void* read_extraargs) {
node, node,
bfe->search bfe->search
); );
BP_TOUCH_CLOCK(node,bfe->child_to_read);
retval = (BP_STATE(node,bfe->child_to_read) != PT_AVAIL); retval = (BP_STATE(node,bfe->child_to_read) != PT_AVAIL);
} }
else { else {
...@@ -629,23 +694,31 @@ BOOL toku_brtnode_pf_req_callback(void* brtnode_pv, void* read_extraargs) { ...@@ -629,23 +694,31 @@ BOOL toku_brtnode_pf_req_callback(void* brtnode_pv, void* read_extraargs) {
// callback for partially reading a node // callback for partially reading a node
// could have just used toku_brtnode_fetch_callback, but wanted to separate the two cases to separate functions // could have just used toku_brtnode_fetch_callback, but wanted to separate the two cases to separate functions
int toku_brtnode_pf_callback(void* brtnode_pv, void* read_extraargs, long* sizep) { int toku_brtnode_pf_callback(void* brtnode_pv, void* read_extraargs, int fd, long* sizep) {
BRTNODE node = brtnode_pv; BRTNODE node = brtnode_pv;
struct brtnode_fetch_extra *bfe = (struct brtnode_fetch_extra *)read_extraargs; struct brtnode_fetch_extra *bfe = (struct brtnode_fetch_extra *)read_extraargs;
// there must be a reason this is being called. If we get a garbage type or the type is brtnode_fetch_none, // there must be a reason this is being called. If we get a garbage type or the type is brtnode_fetch_none,
// then something went wrong // then something went wrong
assert((bfe->type == brtnode_fetch_subset) || (bfe->type == brtnode_fetch_all)); assert((bfe->type == brtnode_fetch_subset) || (bfe->type == brtnode_fetch_all));
// TODO: possibly cilkify expensive operations in this loop
// TODO: review this with others to see if it can be made faster
for (int i = 0; i < node->n_children; i++) { for (int i = 0; i < node->n_children; i++) {
if (BP_STATE(node,i) == PT_AVAIL) { if (BP_STATE(node,i) == PT_AVAIL) {
continue; continue;
} }
if (toku_brtnode_partition_state(bfe, i) == PT_AVAIL) { if (toku_brtnode_partition_state(bfe, i) == PT_AVAIL) {
assert(BP_STATE(node,i) == PT_COMPRESSED); if (BP_STATE(node,i) == PT_COMPRESSED) {
// //
// decompress the subblock // decompress the subblock
// //
toku_deserialize_bp_from_compressed(node, i); toku_deserialize_bp_from_compressed(node, i);
}
else if (BP_STATE(node,i) == PT_ON_DISK) {
toku_deserialize_bp_from_disk(node, i, fd, bfe);
}
else {
assert(FALSE);
}
} }
} }
*sizep = brtnode_memory_size(node); *sizep = brtnode_memory_size(node);
...@@ -695,16 +768,6 @@ brt_compare_pivot(BRT brt, const DBT *key, bytevec ck) ...@@ -695,16 +768,6 @@ brt_compare_pivot(BRT brt, const DBT *key, bytevec ck)
return cmp; return cmp;
} }
static void
destroy_basement_node (BASEMENTNODE bn)
{
// The buffer may have been freed already, in some cases.
if (bn->buffer) {
toku_omt_destroy(&bn->buffer);
bn->buffer = NULL;
}
}
// destroys the internals of the brtnode, but it does not free the values // destroys the internals of the brtnode, but it does not free the values
// that are stored // that are stored
// this is common functionality for toku_brtnode_free and rebalance_brtnode_leaf // this is common functionality for toku_brtnode_free and rebalance_brtnode_leaf
...@@ -852,6 +915,7 @@ initialize_empty_brtnode (BRT t, BRTNODE n, BLOCKNUM nodename, int height, int n ...@@ -852,6 +915,7 @@ initialize_empty_brtnode (BRT t, BRTNODE n, BLOCKNUM nodename, int height, int n
BP_STATE(n,i) = PT_INVALID; BP_STATE(n,i) = PT_INVALID;
BP_OFFSET(n,i) = 0; BP_OFFSET(n,i) = 0;
BP_SUBTREE_EST(n,i) = zero_estimates; BP_SUBTREE_EST(n,i) = zero_estimates;
BP_INIT_TOUCHED_CLOCK(n, i);
n->bp[i].ptr = NULL; n->bp[i].ptr = NULL;
if (height > 0) { if (height > 0) {
n->bp[i].ptr = toku_malloc(sizeof(struct brtnode_nonleaf_childinfo)); n->bp[i].ptr = toku_malloc(sizeof(struct brtnode_nonleaf_childinfo));
...@@ -1167,6 +1231,7 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk, ...@@ -1167,6 +1231,7 @@ brtleaf_split (BRT t, BRTNODE node, BRTNODE *nodea, BRTNODE *nodeb, DBT *splitk,
// //
// handle the move of a subset of data in split_node from node to B // handle the move of a subset of data in split_node from node to B
BP_STATE(B,0) = PT_AVAIL; BP_STATE(B,0) = PT_AVAIL;
struct subtree_estimates se_diff = zero_estimates; struct subtree_estimates se_diff = zero_estimates;
u_int32_t diff_size = 0; u_int32_t diff_size = 0;
......
...@@ -1210,7 +1210,7 @@ static void flush_dirty_pair(CACHETABLE ct, PAIR p) { ...@@ -1210,7 +1210,7 @@ static void flush_dirty_pair(CACHETABLE ct, PAIR p) {
assert(p->dirty); assert(p->dirty);
// must check for readers before trying to grab write lock because this thread // must check for readers before trying to grab write lock because this thread
// may be the thread that already grabbed a read lock via get_and_pin // may be the thread that already grabbed a read lock via get_and_pin
if (!rwlock_readers(&p->rwlock)) { if (!rwlock_users(&p->rwlock)) {
rwlock_write_lock(&p->rwlock, ct->mutex); rwlock_write_lock(&p->rwlock, ct->mutex);
p->state = CTPAIR_WRITING; p->state = CTPAIR_WRITING;
WORKITEM wi = &p->asyncwork; WORKITEM wi = &p->asyncwork;
...@@ -1225,7 +1225,7 @@ static void try_evict_pair(CACHETABLE ct, PAIR p, BOOL* is_attainable) { ...@@ -1225,7 +1225,7 @@ static void try_evict_pair(CACHETABLE ct, PAIR p, BOOL* is_attainable) {
// must check for before we grab the write lock because we may // must check for before we grab the write lock because we may
// be trying to evict something this thread is trying to read // be trying to evict something this thread is trying to read
if (!rwlock_readers(&p->rwlock)) { if (!rwlock_users(&p->rwlock)) {
rwlock_write_lock(&p->rwlock, ct->mutex); rwlock_write_lock(&p->rwlock, ct->mutex);
p->state = CTPAIR_WRITING; p->state = CTPAIR_WRITING;
cachetable_write_pair(ct, p, TRUE); cachetable_write_pair(ct, p, TRUE);
...@@ -1270,7 +1270,7 @@ static int maybe_flush_some (CACHETABLE ct, long size) { ...@@ -1270,7 +1270,7 @@ static int maybe_flush_some (CACHETABLE ct, long size) {
} }
if (curr_in_clock->count > 0) { if (curr_in_clock->count > 0) {
// TODO: (Zardosht), this is where the callback function for node level eviction will happen // TODO: (Zardosht), this is where the callback function for node level eviction will happen
if (curr_in_clock->state == CTPAIR_IDLE && !rwlock_readers(&curr_in_clock->rwlock)) { if (curr_in_clock->state == CTPAIR_IDLE && !rwlock_users(&curr_in_clock->rwlock)) {
curr_in_clock->count--; curr_in_clock->count--;
// call the partial eviction callback // call the partial eviction callback
rwlock_write_lock(&curr_in_clock->rwlock, ct->mutex); rwlock_write_lock(&curr_in_clock->rwlock, ct->mutex);
...@@ -1533,12 +1533,18 @@ int toku_cachetable_get_and_pin ( ...@@ -1533,12 +1533,18 @@ int toku_cachetable_get_and_pin (
// to be decompressed. So, we check to see if a partial fetch is required // to be decompressed. So, we check to see if a partial fetch is required
// //
get_and_pin_footprint = 7; get_and_pin_footprint = 7;
rwlock_read_lock(&p->rwlock, ct->mutex);
if (do_wait_time)
cachetable_waittime += get_tnow() - t0;
BOOL partial_fetch_required = pf_req_callback(p->value,read_extraargs); BOOL partial_fetch_required = pf_req_callback(p->value,read_extraargs);
// //
// in this case, a partial fetch is required so we must grab the PAIR's write lock // in this case, a partial fetch is required so we must grab the PAIR's write lock
// and then call a callback to retrieve what we need // and then call a callback to retrieve what we need
// //
if (partial_fetch_required) { if (partial_fetch_required) {
rwlock_read_unlock(&p->rwlock);
rwlock_write_lock(&p->rwlock, ct->mutex); rwlock_write_lock(&p->rwlock, ct->mutex);
if (do_wait_time) { if (do_wait_time) {
cachetable_waittime += get_tnow() - t0; cachetable_waittime += get_tnow() - t0;
...@@ -1546,17 +1552,20 @@ int toku_cachetable_get_and_pin ( ...@@ -1546,17 +1552,20 @@ int toku_cachetable_get_and_pin (
t0 = get_tnow(); t0 = get_tnow();
long old_size = p->size; long old_size = p->size;
long size = 0; long size = 0;
int r = pf_callback(p->value, read_extraargs, &size); rwlock_prefer_read_lock(&cachefile->fdlock, ct->mutex);
cachetable_unlock(ct);
int r = pf_callback(p->value, read_extraargs, cachefile->fd, &size);
cachetable_lock(ct);
rwlock_read_unlock(&cachefile->fdlock);
p->size = size; p->size = size;
ct->size_current += size; ct->size_current += size;
ct->size_current -= old_size; ct->size_current -= old_size;
lazy_assert_zero(r); lazy_assert_zero(r);
cachetable_waittime += get_tnow() - t0; cachetable_waittime += get_tnow() - t0;
rwlock_write_unlock(&p->rwlock); rwlock_write_unlock(&p->rwlock);
rwlock_read_lock(&p->rwlock, ct->mutex);
} }
rwlock_read_lock(&p->rwlock, ct->mutex);
if (do_wait_time)
cachetable_waittime += get_tnow() - t0;
get_and_pin_footprint = 8; get_and_pin_footprint = 8;
if (p->state == CTPAIR_INVALID) { if (p->state == CTPAIR_INVALID) {
get_and_pin_footprint = 9; get_and_pin_footprint = 9;
...@@ -1810,22 +1819,26 @@ int toku_cachetable_get_and_pin_nonblocking ( ...@@ -1810,22 +1819,26 @@ int toku_cachetable_get_and_pin_nonblocking (
return TOKUDB_TRY_AGAIN; return TOKUDB_TRY_AGAIN;
case CTPAIR_IDLE: case CTPAIR_IDLE:
{ {
rwlock_read_lock(&p->rwlock, ct->mutex);
BOOL partial_fetch_required = pf_req_callback(p->value,read_extraargs); BOOL partial_fetch_required = pf_req_callback(p->value,read_extraargs);
rwlock_read_unlock(&p->rwlock);
// //
// in this case, a partial fetch is required so we must grab the PAIR's write lock // in this case, a partial fetch is required so we must grab the PAIR's write lock
// and then call a callback to retrieve what we need // and then call a callback to retrieve what we need
// //
if (partial_fetch_required) { if (partial_fetch_required) {
rwlock_write_lock(&p->rwlock, ct->mutex);
run_unlockers(unlockers); // The contract says the unlockers are run with the ct lock being held. run_unlockers(unlockers); // The contract says the unlockers are run with the ct lock being held.
if (ct->ydb_unlock_callback) ct->ydb_unlock_callback(); if (ct->ydb_unlock_callback) ct->ydb_unlock_callback();
// Now wait for the I/O to occur. // Now wait for the I/O to occur.
rwlock_write_lock(&p->rwlock, ct->mutex); rwlock_prefer_read_lock(&cf->fdlock, ct->mutex);
cachetable_unlock(ct); cachetable_unlock(ct);
long old_size = p->size; long old_size = p->size;
long size = 0; long size = 0;
int r = pf_callback(p->value, read_extraargs, &size); int r = pf_callback(p->value, read_extraargs, cf->fd, &size);
lazy_assert_zero(r); lazy_assert_zero(r);
cachetable_lock(ct); cachetable_lock(ct);
rwlock_read_unlock(&cf->fdlock);
p->size = size; p->size = size;
ct->size_current += size; ct->size_current += size;
ct->size_current -= old_size; ct->size_current -= old_size;
......
...@@ -135,7 +135,7 @@ typedef BOOL (*CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK)(void *brtnode_pv, voi ...@@ -135,7 +135,7 @@ typedef BOOL (*CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK)(void *brtnode_pv, voi
// The partial fetch callback is called when a thread needs to read a subset of a PAIR into memory // The partial fetch callback is called when a thread needs to read a subset of a PAIR into memory
// Returns: 0 if success, otherwise an error number. // Returns: 0 if success, otherwise an error number.
// The number of bytes added is returned in sizep // The number of bytes added is returned in sizep
typedef int (*CACHETABLE_PARTIAL_FETCH_CALLBACK)(void *brtnode_pv, void *read_extraargs, long *sizep); typedef int (*CACHETABLE_PARTIAL_FETCH_CALLBACK)(void *brtnode_pv, void *read_extraargs, int fd, long *sizep);
void toku_cachefile_set_userdata(CACHEFILE cf, void *userdata, void toku_cachefile_set_userdata(CACHEFILE cf, void *userdata,
int (*log_fassociate_during_checkpoint)(CACHEFILE, void*), int (*log_fassociate_during_checkpoint)(CACHEFILE, void*),
......
...@@ -527,9 +527,10 @@ static BOOL toku_rollback_pf_req_callback(void* UU(brtnode_pv), void* UU(read_ex ...@@ -527,9 +527,10 @@ static BOOL toku_rollback_pf_req_callback(void* UU(brtnode_pv), void* UU(read_ex
return FALSE; return FALSE;
} }
static int toku_rollback_pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int toku_rollback_pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
// should never be called, given that toku_rollback_pf_req_callback always returns false // should never be called, given that toku_rollback_pf_req_callback always returns false
assert(FALSE); assert(FALSE);
return 0;
} }
......
...@@ -80,8 +80,66 @@ static int check_leafentries(OMTVALUE v, u_int32_t UU(i), void *extra) { ...@@ -80,8 +80,66 @@ static int check_leafentries(OMTVALUE v, u_int32_t UU(i), void *extra) {
return 0; return 0;
} }
enum brtnode_verify_type {
read_all=1,
read_compressed,
read_none
};
static void
setup_dn(enum brtnode_verify_type bft, int fd, struct brt_header *brt_h, BRTNODE *dn) {
int r;
if (bft == read_all) {
struct brtnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt_h);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &bfe);
assert(r==0);
}
else if (bft == read_compressed || bft == read_none) {
struct brtnode_fetch_extra bfe;
fill_bfe_for_min_read(&bfe, brt_h);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &bfe);
assert(r==0);
// assert all bp's are compressed
for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_COMPRESSED);
}
// if read_none, get rid of the compressed bp's
if (bft == read_none) {
long bytes_freed = 0;
toku_brtnode_pe_callback(*dn, 0xffffffff, &bytes_freed, NULL);
// assert all bp's are on disk
for (int i = 0; i < (*dn)->n_children; i++) {
if ((*dn)->height == 0) {
assert(BP_STATE(*dn,i) == PT_ON_DISK);
assert((*dn)->bp[i].ptr == NULL);
}
else {
assert(BP_STATE(*dn,i) == PT_COMPRESSED);
assert((*dn)->bp[i].ptr != NULL);
}
}
}
// now decompress them
fill_bfe_for_full_read(&bfe, brt_h);
assert(toku_brtnode_pf_req_callback(*dn, &bfe));
long size;
r = toku_brtnode_pf_callback(*dn, &bfe, fd, &size);
assert(r==0);
// assert all bp's are available
for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_AVAIL);
}
// continue on with test
}
else {
// if we get here, this is a test bug, NOT a bug in development code
assert(FALSE);
}
}
static void static void
test_serialize_leaf_with_large_pivots(void) { test_serialize_leaf_with_large_pivots(enum brtnode_verify_type bft) {
int r; int r;
struct brtnode sn, *dn; struct brtnode sn, *dn;
const int keylens = 256*1024, vallens = 0, nrows = 8; const int keylens = 256*1024, vallens = 0, nrows = 8;
...@@ -160,11 +218,8 @@ test_serialize_leaf_with_large_pivots(void) { ...@@ -160,11 +218,8 @@ test_serialize_leaf_with_large_pivots(void) {
r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE); r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE);
assert(r==0); assert(r==0);
struct brtnode_fetch_extra bfe; setup_dn(bft, fd, brt_h, &dn);
fill_bfe_for_full_read(&bfe, brt_h);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe);
assert(r==0);
assert(dn->thisnodename.b==20); assert(dn->thisnodename.b==20);
assert(dn->layout_version ==BRT_LAYOUT_VERSION); assert(dn->layout_version ==BRT_LAYOUT_VERSION);
...@@ -211,7 +266,7 @@ test_serialize_leaf_with_large_pivots(void) { ...@@ -211,7 +266,7 @@ test_serialize_leaf_with_large_pivots(void) {
} }
static void static void
test_serialize_leaf_with_many_rows(void) { test_serialize_leaf_with_many_rows(enum brtnode_verify_type bft) {
int r; int r;
struct brtnode sn, *dn; struct brtnode sn, *dn;
const int keylens = sizeof(int), vallens = sizeof(int), nrows = 196*1024; const int keylens = sizeof(int), vallens = sizeof(int), nrows = 196*1024;
...@@ -283,10 +338,7 @@ test_serialize_leaf_with_many_rows(void) { ...@@ -283,10 +338,7 @@ test_serialize_leaf_with_many_rows(void) {
r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE); r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE);
assert(r==0); assert(r==0);
struct brtnode_fetch_extra bfe; setup_dn(bft, fd, brt_h, &dn);
fill_bfe_for_full_read(&bfe, brt_h);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe);
assert(r==0);
assert(dn->thisnodename.b==20); assert(dn->thisnodename.b==20);
...@@ -335,7 +387,7 @@ test_serialize_leaf_with_many_rows(void) { ...@@ -335,7 +387,7 @@ test_serialize_leaf_with_many_rows(void) {
} }
static void static void
test_serialize_leaf_with_large_rows(void) { test_serialize_leaf_with_large_rows(enum brtnode_verify_type bft) {
int r; int r;
struct brtnode sn, *dn; struct brtnode sn, *dn;
const size_t val_size = 512*1024; const size_t val_size = 512*1024;
...@@ -412,10 +464,7 @@ test_serialize_leaf_with_large_rows(void) { ...@@ -412,10 +464,7 @@ test_serialize_leaf_with_large_rows(void) {
r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE); r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE);
assert(r==0); assert(r==0);
struct brtnode_fetch_extra bfe; setup_dn(bft, fd, brt_h, &dn);
fill_bfe_for_full_read(&bfe, brt_h);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe);
assert(r==0);
assert(dn->thisnodename.b==20); assert(dn->thisnodename.b==20);
...@@ -464,7 +513,7 @@ test_serialize_leaf_with_large_rows(void) { ...@@ -464,7 +513,7 @@ test_serialize_leaf_with_large_rows(void) {
} }
static void static void
test_serialize_leaf_with_empty_basement_nodes(void) { test_serialize_leaf_with_empty_basement_nodes(enum brtnode_verify_type bft) {
const int nodesize = 1024; const int nodesize = 1024;
struct brtnode sn, *dn; struct brtnode sn, *dn;
...@@ -544,10 +593,7 @@ test_serialize_leaf_with_empty_basement_nodes(void) { ...@@ -544,10 +593,7 @@ test_serialize_leaf_with_empty_basement_nodes(void) {
r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE); r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE);
assert(r==0); assert(r==0);
struct brtnode_fetch_extra bfe; setup_dn(bft, fd, brt_h, &dn);
fill_bfe_for_full_read(&bfe, brt_h);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe);
assert(r==0);
assert(dn->thisnodename.b==20); assert(dn->thisnodename.b==20);
...@@ -597,7 +643,7 @@ test_serialize_leaf_with_empty_basement_nodes(void) { ...@@ -597,7 +643,7 @@ test_serialize_leaf_with_empty_basement_nodes(void) {
} }
static void static void
test_serialize_leaf(void) { test_serialize_leaf(enum brtnode_verify_type bft) {
// struct brt source_brt; // struct brt source_brt;
const int nodesize = 1024; const int nodesize = 1024;
struct brtnode sn, *dn; struct brtnode sn, *dn;
...@@ -675,10 +721,7 @@ test_serialize_leaf(void) { ...@@ -675,10 +721,7 @@ test_serialize_leaf(void) {
r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE); r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE);
assert(r==0); assert(r==0);
struct brtnode_fetch_extra bfe; setup_dn(bft, fd, brt_h, &dn);
fill_bfe_for_full_read(&bfe, brt_h);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe);
assert(r==0);
assert(dn->thisnodename.b==20); assert(dn->thisnodename.b==20);
...@@ -731,7 +774,7 @@ test_serialize_leaf(void) { ...@@ -731,7 +774,7 @@ test_serialize_leaf(void) {
} }
static void static void
test_serialize_nonleaf(void) { test_serialize_nonleaf(enum brtnode_verify_type bft) {
// struct brt source_brt; // struct brt source_brt;
const int nodesize = 1024; const int nodesize = 1024;
struct brtnode sn, *dn; struct brtnode sn, *dn;
...@@ -823,11 +866,7 @@ test_serialize_nonleaf(void) { ...@@ -823,11 +866,7 @@ test_serialize_nonleaf(void) {
assert(sn.max_msn_applied_to_node_on_disk.msn == TESTMSNMEMVAL); assert(sn.max_msn_applied_to_node_on_disk.msn == TESTMSNMEMVAL);
assert(sn.max_msn_applied_to_node_in_memory.msn == TESTMSNMEMVAL); assert(sn.max_msn_applied_to_node_in_memory.msn == TESTMSNMEMVAL);
setup_dn(bft, fd, brt_h, &dn);
struct brtnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt_h);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe);
assert(r==0);
assert(dn->thisnodename.b==20); assert(dn->thisnodename.b==20);
assert(dn->max_msn_applied_to_node_on_disk.msn == TESTMSNMEMVAL); assert(dn->max_msn_applied_to_node_on_disk.msn == TESTMSNMEMVAL);
...@@ -865,11 +904,29 @@ test_serialize_nonleaf(void) { ...@@ -865,11 +904,29 @@ test_serialize_nonleaf(void) {
int int
test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) { test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute__((__unused__))) {
toku_memory_check = 1; toku_memory_check = 1;
test_serialize_leaf();
test_serialize_leaf_with_empty_basement_nodes(); test_serialize_leaf(read_none);
test_serialize_leaf_with_large_rows(); test_serialize_leaf(read_all);
test_serialize_leaf_with_many_rows(); test_serialize_leaf(read_compressed);
test_serialize_leaf_with_large_pivots();
test_serialize_nonleaf(); test_serialize_leaf_with_empty_basement_nodes(read_none);
test_serialize_leaf_with_empty_basement_nodes(read_all);
test_serialize_leaf_with_empty_basement_nodes(read_compressed);
test_serialize_leaf_with_large_rows(read_none);
test_serialize_leaf_with_large_rows(read_all);
test_serialize_leaf_with_large_rows(read_compressed);
test_serialize_leaf_with_many_rows(read_none);
test_serialize_leaf_with_many_rows(read_all);
test_serialize_leaf_with_many_rows(read_compressed);
test_serialize_leaf_with_large_pivots(read_none);
test_serialize_leaf_with_large_pivots(read_all);
test_serialize_leaf_with_large_pivots(read_compressed);
test_serialize_nonleaf(read_none);
test_serialize_nonleaf(read_all);
test_serialize_nonleaf(read_compressed);
return 0; return 0;
} }
...@@ -51,7 +51,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -51,7 +51,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -69,7 +69,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -69,7 +69,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -59,7 +59,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -59,7 +59,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -85,7 +85,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -85,7 +85,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -53,7 +53,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -53,7 +53,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -48,7 +48,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -48,7 +48,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -57,7 +57,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -57,7 +57,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -58,7 +58,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -58,7 +58,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -57,7 +57,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -57,7 +57,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -69,7 +69,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -69,7 +69,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -53,7 +53,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -53,7 +53,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -54,7 +54,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -54,7 +54,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -53,7 +53,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -53,7 +53,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -57,7 +57,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -57,7 +57,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -95,7 +95,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -95,7 +95,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -60,7 +60,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -60,7 +60,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -49,7 +49,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -49,7 +49,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -171,7 +171,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -171,7 +171,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -134,7 +134,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -134,7 +134,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
...@@ -44,7 +44,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -44,7 +44,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return FALSE; return FALSE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), long* UU(sizep)) { static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), long* UU(sizep)) {
assert(FALSE); assert(FALSE);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment