Commit 7942bcf2 authored by Zardosht Kasheff's avatar Zardosht Kasheff Committed by Yoni Fogel

[t:4028], merge to main

git-svn-id: file:///svn/toku/tokudb@41142 c7de825b-a66e-492c-adef-691d508d4ae1
parent 06d75f5e
...@@ -127,6 +127,7 @@ toku_pin_brtnode( ...@@ -127,6 +127,7 @@ toku_pin_brtnode(
ANCESTORS ancestors, ANCESTORS ancestors,
const PIVOT_BOUNDS bounds, const PIVOT_BOUNDS bounds,
BRTNODE_FETCH_EXTRA bfe, BRTNODE_FETCH_EXTRA bfe,
BOOL may_modify_node,
BOOL apply_ancestor_messages, // this BOOL is probably temporary, for #3972, once we know how range query estimates work, will revisit this BOOL apply_ancestor_messages, // this BOOL is probably temporary, for #3972, once we know how range query estimates work, will revisit this
BRTNODE *node_p, BRTNODE *node_p,
BOOL* msgs_applied) BOOL* msgs_applied)
...@@ -143,6 +144,7 @@ toku_pin_brtnode( ...@@ -143,6 +144,7 @@ toku_pin_brtnode(
toku_brtnode_fetch_callback, toku_brtnode_fetch_callback,
toku_brtnode_pf_req_callback, toku_brtnode_pf_req_callback,
toku_brtnode_pf_callback, toku_brtnode_pf_callback,
may_modify_node,
bfe, //read_extraargs bfe, //read_extraargs
unlockers); unlockers);
if (r==0) { if (r==0) {
...@@ -168,6 +170,7 @@ toku_pin_brtnode_holding_lock( ...@@ -168,6 +170,7 @@ toku_pin_brtnode_holding_lock(
const PIVOT_BOUNDS bounds, const PIVOT_BOUNDS bounds,
BRTNODE_FETCH_EXTRA bfe, BRTNODE_FETCH_EXTRA bfe,
BOOL apply_ancestor_messages, // this BOOL is probably temporary, for #3972, once we know how range query estimates work, will revisit this BOOL apply_ancestor_messages, // this BOOL is probably temporary, for #3972, once we know how range query estimates work, will revisit this
BOOL may_modify_node,
BRTNODE *node_p) BRTNODE *node_p)
{ {
void *node_v; void *node_v;
...@@ -181,6 +184,7 @@ toku_pin_brtnode_holding_lock( ...@@ -181,6 +184,7 @@ toku_pin_brtnode_holding_lock(
toku_brtnode_fetch_callback, toku_brtnode_fetch_callback,
toku_brtnode_pf_req_callback, toku_brtnode_pf_req_callback,
toku_brtnode_pf_callback, toku_brtnode_pf_callback,
may_modify_node,
bfe bfe
); );
assert(r==0); assert(r==0);
...@@ -196,6 +200,7 @@ toku_pin_brtnode_off_client_thread( ...@@ -196,6 +200,7 @@ toku_pin_brtnode_off_client_thread(
BLOCKNUM blocknum, BLOCKNUM blocknum,
u_int32_t fullhash, u_int32_t fullhash,
BRTNODE_FETCH_EXTRA bfe, BRTNODE_FETCH_EXTRA bfe,
BOOL may_modify_node,
u_int32_t num_dependent_nodes, u_int32_t num_dependent_nodes,
BRTNODE* dependent_nodes, BRTNODE* dependent_nodes,
BRTNODE *node_p) BRTNODE *node_p)
...@@ -222,6 +227,7 @@ toku_pin_brtnode_off_client_thread( ...@@ -222,6 +227,7 @@ toku_pin_brtnode_off_client_thread(
toku_brtnode_fetch_callback, toku_brtnode_fetch_callback,
toku_brtnode_pf_req_callback, toku_brtnode_pf_req_callback,
toku_brtnode_pf_callback, toku_brtnode_pf_callback,
may_modify_node,
bfe, bfe,
num_dependent_nodes, num_dependent_nodes,
dependent_cf, dependent_cf,
......
...@@ -71,6 +71,7 @@ toku_pin_brtnode( ...@@ -71,6 +71,7 @@ toku_pin_brtnode(
ANCESTORS ancestors, ANCESTORS ancestors,
const PIVOT_BOUNDS pbounds, const PIVOT_BOUNDS pbounds,
BRTNODE_FETCH_EXTRA bfe, BRTNODE_FETCH_EXTRA bfe,
BOOL may_modify_node,
BOOL apply_ancestor_messages, // this BOOL is probably temporary, for #3972, once we know how range query estimates work, will revisit this BOOL apply_ancestor_messages, // this BOOL is probably temporary, for #3972, once we know how range query estimates work, will revisit this
BRTNODE *node_p, BRTNODE *node_p,
BOOL* msgs_applied BOOL* msgs_applied
...@@ -88,6 +89,7 @@ toku_pin_brtnode_holding_lock( ...@@ -88,6 +89,7 @@ toku_pin_brtnode_holding_lock(
const PIVOT_BOUNDS pbounds, const PIVOT_BOUNDS pbounds,
BRTNODE_FETCH_EXTRA bfe, BRTNODE_FETCH_EXTRA bfe,
BOOL apply_ancestor_messages, BOOL apply_ancestor_messages,
BOOL may_modify_node,
BRTNODE *node_p BRTNODE *node_p
); );
...@@ -104,6 +106,7 @@ toku_pin_brtnode_off_client_thread( ...@@ -104,6 +106,7 @@ toku_pin_brtnode_off_client_thread(
BLOCKNUM blocknum, BLOCKNUM blocknum,
u_int32_t fullhash, u_int32_t fullhash,
BRTNODE_FETCH_EXTRA bfe, BRTNODE_FETCH_EXTRA bfe,
BOOL may_modify_node,
u_int32_t num_dependent_nodes, u_int32_t num_dependent_nodes,
BRTNODE* dependent_nodes, BRTNODE* dependent_nodes,
BRTNODE *node_p BRTNODE *node_p
......
...@@ -400,7 +400,7 @@ ct_maybe_merge_child(struct flusher_advice *fa, ...@@ -400,7 +400,7 @@ ct_maybe_merge_child(struct flusher_advice *fa,
CACHEKEY *rootp = toku_calculate_root_offset_pointer(h, &fullhash); CACHEKEY *rootp = toku_calculate_root_offset_pointer(h, &fullhash);
struct brtnode_fetch_extra bfe; struct brtnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, h); fill_bfe_for_full_read(&bfe, h);
toku_pin_brtnode_off_client_thread(h, *rootp, fullhash, &bfe, 0,NULL, &root_node); toku_pin_brtnode_off_client_thread(h, *rootp, fullhash, &bfe, TRUE, 0, NULL, &root_node);
toku_assert_entire_node_in_memory(root_node); toku_assert_entire_node_in_memory(root_node);
toku_brtheader_release_treelock(h); toku_brtheader_release_treelock(h);
...@@ -512,8 +512,6 @@ handle_split_of_child( ...@@ -512,8 +512,6 @@ handle_split_of_child(
BP_BLOCKNUM(node, childnum+1) = childb->thisnodename; BP_BLOCKNUM(node, childnum+1) = childb->thisnodename;
BP_WORKDONE(node, childnum+1) = 0; BP_WORKDONE(node, childnum+1) = 0;
BP_STATE(node,childnum+1) = PT_AVAIL; BP_STATE(node,childnum+1) = PT_AVAIL;
BP_START(node,childnum+1) = 0;
BP_SIZE(node,childnum+1) = 0;
set_BNC(node, childnum+1, toku_create_empty_nl()); set_BNC(node, childnum+1, toku_create_empty_nl());
...@@ -824,8 +822,6 @@ brtleaf_split( ...@@ -824,8 +822,6 @@ brtleaf_split(
for (int i = 0; i < num_children_in_b; i++) { for (int i = 0; i < num_children_in_b; i++) {
BP_BLOCKNUM(B,i).b = 0; BP_BLOCKNUM(B,i).b = 0;
BP_STATE(B,i) = PT_AVAIL; BP_STATE(B,i) = PT_AVAIL;
BP_START(B,i) = 0;
BP_SIZE(B,i) = 0;
BP_WORKDONE(B,i) = 0; BP_WORKDONE(B,i) = 0;
set_BLB(B, i, toku_create_empty_bn()); set_BLB(B, i, toku_create_empty_bn());
} }
...@@ -1361,7 +1357,7 @@ brt_merge_child( ...@@ -1361,7 +1357,7 @@ brt_merge_child(
u_int32_t childfullhash = compute_child_fullhash(h->cf, node, childnuma); u_int32_t childfullhash = compute_child_fullhash(h->cf, node, childnuma);
struct brtnode_fetch_extra bfe; struct brtnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, h); fill_bfe_for_full_read(&bfe, h);
toku_pin_brtnode_off_client_thread(h, BP_BLOCKNUM(node, childnuma), childfullhash, &bfe, 1, &node, &childa); toku_pin_brtnode_off_client_thread(h, BP_BLOCKNUM(node, childnuma), childfullhash, &bfe, TRUE, 1, &node, &childa);
} }
// for test // for test
call_flusher_thread_callback(ft_flush_before_pin_second_node_for_merge); call_flusher_thread_callback(ft_flush_before_pin_second_node_for_merge);
...@@ -1372,7 +1368,7 @@ brt_merge_child( ...@@ -1372,7 +1368,7 @@ brt_merge_child(
u_int32_t childfullhash = compute_child_fullhash(h->cf, node, childnumb); u_int32_t childfullhash = compute_child_fullhash(h->cf, node, childnumb);
struct brtnode_fetch_extra bfe; struct brtnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, h); fill_bfe_for_full_read(&bfe, h);
toku_pin_brtnode_off_client_thread(h, BP_BLOCKNUM(node, childnumb), childfullhash, &bfe, 2, dep_nodes, &childb); toku_pin_brtnode_off_client_thread(h, BP_BLOCKNUM(node, childnumb), childfullhash, &bfe, TRUE, 2, dep_nodes, &childb);
} }
if (toku_bnc_n_entries(BNC(node,childnuma))>0) { if (toku_bnc_n_entries(BNC(node,childnuma))>0) {
...@@ -1498,7 +1494,7 @@ flush_some_child( ...@@ -1498,7 +1494,7 @@ flush_some_child(
// Note that we don't read the entire node into memory yet. // Note that we don't read the entire node into memory yet.
// The idea is let's try to do the minimum work before releasing the parent lock // The idea is let's try to do the minimum work before releasing the parent lock
fill_bfe_for_min_read(&bfe, h); fill_bfe_for_min_read(&bfe, h);
toku_pin_brtnode_off_client_thread(h, targetchild, childfullhash, &bfe, 1, &parent, &child); toku_pin_brtnode_off_client_thread(h, targetchild, childfullhash, &bfe, TRUE, 1, &parent, &child);
// for test // for test
call_flusher_thread_callback(ft_flush_after_child_pin); call_flusher_thread_callback(ft_flush_after_child_pin);
......
...@@ -280,6 +280,7 @@ toku_brt_hot_optimize(BRT brt, ...@@ -280,6 +280,7 @@ toku_brt_hot_optimize(BRT brt,
(BLOCKNUM) *rootp, (BLOCKNUM) *rootp,
fullhash, fullhash,
&bfe, &bfe,
TRUE,
0, 0,
NULL, NULL,
&root); &root);
......
...@@ -188,6 +188,22 @@ typedef struct __attribute__((__packed__)) brtnode_child_pointer { ...@@ -188,6 +188,22 @@ typedef struct __attribute__((__packed__)) brtnode_child_pointer {
} u; } u;
} BRTNODE_CHILD_POINTER; } BRTNODE_CHILD_POINTER;
struct brtnode_disk_data {
//
// stores the offset to the beginning of the partition on disk from the brtnode, and the length, needed to read a partition off of disk
// the value is only meaningful if the node is clean. If the node is dirty, then the value is meaningless
// The START is the distance from the end of the compressed node_info data, to the beginning of the compressed partition
// The SIZE is the size of the compressed partition.
// Rationale: We cannot store the size from the beginning of the node since we don't know how big the header will be.
// However, later when we are doing aligned writes, we won't be able to store the size from the end since we want things to align.
u_int32_t start;
u_int32_t size;
};
#define BP_START(node_dd,i) ((node_dd)[i].start)
#define BP_SIZE(node_dd,i) ((node_dd)[i].size)
// a brtnode partition, associated with a child of a node // a brtnode partition, associated with a child of a node
struct __attribute__((__packed__)) brtnode_partition { struct __attribute__((__packed__)) brtnode_partition {
// the following three variables are used for nonleaf nodes // the following three variables are used for nonleaf nodes
...@@ -203,14 +219,6 @@ struct __attribute__((__packed__)) brtnode_partition { ...@@ -203,14 +219,6 @@ struct __attribute__((__packed__)) brtnode_partition {
// //
enum pt_state state; // make this an enum to make debugging easier. enum pt_state state; // make this an enum to make debugging easier.
// //
// stores the offset to the beginning of the partition on disk from the brtnode, and the length, needed to read a partition off of disk
// the value is only meaningful if the node is clean. If the node is dirty, then the value is meaningless
// The START is the distance from the end of the compressed node_info data, to the beginning of the compressed partition
// The SIZE is the size of the compressed partition.
// Rationale: We cannot store the size from the beginning of the node since we don't know how big the header will be.
// However, later when we are doing aligned writes, we won't be able to store the size from the end since we want things to align.
u_int32_t start,size;
//
// pointer to the partition. Depending on the state, they may be different things // pointer to the partition. Depending on the state, they may be different things
// if state == PT_INVALID, then the node was just initialized and ptr == NULL // if state == PT_INVALID, then the node was just initialized and ptr == NULL
// if state == PT_ON_DISK, then ptr == NULL // if state == PT_ON_DISK, then ptr == NULL
...@@ -258,11 +266,7 @@ struct brtnode { ...@@ -258,11 +266,7 @@ struct brtnode {
// brtnode partition macros // brtnode partition macros
// BP stands for brtnode_partition // BP stands for brtnode_partition
#define BP_BLOCKNUM(node,i) ((node)->bp[i].blocknum) #define BP_BLOCKNUM(node,i) ((node)->bp[i].blocknum)
#define BP_HAVE_FULLHASH(node,i) ((node)->bp[i].have_fullhash)
#define BP_FULLHASH(node,i) ((node)->bp[i].fullhash)
#define BP_STATE(node,i) ((node)->bp[i].state) #define BP_STATE(node,i) ((node)->bp[i].state)
#define BP_START(node,i) ((node)->bp[i].start)
#define BP_SIZE(node,i) ((node)->bp[i].size)
#define BP_WORKDONE(node, i)((node)->bp[i].workdone) #define BP_WORKDONE(node, i)((node)->bp[i].workdone)
// //
...@@ -448,18 +452,21 @@ toku_create_compressed_partition_from_available( ...@@ -448,18 +452,21 @@ toku_create_compressed_partition_from_available(
int childnum, int childnum,
SUB_BLOCK sb SUB_BLOCK sb
); );
void rebalance_brtnode_leaf(BRTNODE node, unsigned int basementnodesize);
int toku_serialize_brtnode_to_memory (BRTNODE node, int toku_serialize_brtnode_to_memory (BRTNODE node,
BRTNODE_DISK_DATA* ndd,
unsigned int basementnodesize, unsigned int basementnodesize,
BOOL do_rebalancing,
/*out*/ size_t *n_bytes_to_write, /*out*/ size_t *n_bytes_to_write,
/*out*/ char **bytes_to_write); /*out*/ char **bytes_to_write);
int toku_serialize_brtnode_to(int fd, BLOCKNUM, BRTNODE node, struct brt_header *h, int n_workitems, int n_threads, BOOL for_checkpoint); int toku_serialize_brtnode_to(int fd, BLOCKNUM, BRTNODE node, BRTNODE_DISK_DATA* ndd, BOOL do_rebalancing, struct brt_header *h, int n_workitems, int n_threads, BOOL for_checkpoint);
int toku_serialize_rollback_log_to (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE log, int toku_serialize_rollback_log_to (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE log,
struct brt_header *h, int n_workitems, int n_threads, struct brt_header *h, int n_workitems, int n_threads,
BOOL for_checkpoint); BOOL for_checkpoint);
int toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, u_int32_t fullhash, ROLLBACK_LOG_NODE *logp, struct brt_header *h); int toku_deserialize_rollback_log_from (int fd, BLOCKNUM blocknum, u_int32_t fullhash, ROLLBACK_LOG_NODE *logp, struct brt_header *h);
void toku_deserialize_bp_from_disk(BRTNODE node, int childnum, int fd, struct brtnode_fetch_extra* bfe); void toku_deserialize_bp_from_disk(BRTNODE node, BRTNODE_DISK_DATA ndd, int childnum, int fd, struct brtnode_fetch_extra* bfe);
void toku_deserialize_bp_from_compressed(BRTNODE node, int childnum, DESCRIPTOR desc, brt_compare_func cmp); void toku_deserialize_bp_from_compressed(BRTNODE node, int childnum, DESCRIPTOR desc, brt_compare_func cmp);
int toku_deserialize_brtnode_from (int fd, BLOCKNUM off, u_int32_t /*fullhash*/, BRTNODE *brtnode, struct brtnode_fetch_extra* bfe); int toku_deserialize_brtnode_from (int fd, BLOCKNUM off, u_int32_t /*fullhash*/, BRTNODE *brtnode, BRTNODE_DISK_DATA* ndd, struct brtnode_fetch_extra* bfe);
unsigned int toku_serialize_brtnode_size(BRTNODE node); /* How much space will it take? */ unsigned int toku_serialize_brtnode_size(BRTNODE node); /* How much space will it take? */
int toku_keycompare (bytevec key1, ITEMLEN key1len, bytevec key2, ITEMLEN key2len); int toku_keycompare (bytevec key1, ITEMLEN key1len, bytevec key2, ITEMLEN key2len);
...@@ -477,6 +484,8 @@ int toku_serialize_descriptor_contents_to_fd(int fd, const DESCRIPTOR desc, DISK ...@@ -477,6 +484,8 @@ int toku_serialize_descriptor_contents_to_fd(int fd, const DESCRIPTOR desc, DISK
void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, const DESCRIPTOR desc); void toku_serialize_descriptor_contents_to_wbuf(struct wbuf *wb, const DESCRIPTOR desc);
BASEMENTNODE toku_create_empty_bn(void); BASEMENTNODE toku_create_empty_bn(void);
BASEMENTNODE toku_create_empty_bn_no_buffer(void); // create a basement node with a null buffer. BASEMENTNODE toku_create_empty_bn_no_buffer(void); // create a basement node with a null buffer.
NONLEAF_CHILDINFO toku_clone_nl(NONLEAF_CHILDINFO orig_childinfo);
BASEMENTNODE toku_clone_bn(BASEMENTNODE orig_bn);
NONLEAF_CHILDINFO toku_create_empty_nl(void); NONLEAF_CHILDINFO toku_create_empty_nl(void);
// FIXME needs toku prefix // FIXME needs toku prefix
void destroy_basement_node (BASEMENTNODE bn); void destroy_basement_node (BASEMENTNODE bn);
...@@ -529,12 +538,13 @@ struct brtenv { ...@@ -529,12 +538,13 @@ struct brtenv {
}; };
void toku_brt_status_update_pivot_fetch_reason(struct brtnode_fetch_extra *bfe); void toku_brt_status_update_pivot_fetch_reason(struct brtnode_fetch_extra *bfe);
extern void toku_brtnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename, void *brtnode_v, void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, BOOL write_me, BOOL keep_me, BOOL for_checkpoint); extern void toku_brtnode_clone_callback(void* value_data, void** cloned_value_data, PAIR_ATTR* new_attr, BOOL for_checkpoint, void* write_extraargs);
extern int toku_brtnode_fetch_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename, u_int32_t fullhash, void **brtnode_pv, PAIR_ATTR *sizep, int*dirty, void*extraargs); extern void toku_brtnode_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename, void *brtnode_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, BOOL write_me, BOOL keep_me, BOOL for_checkpoint, BOOL is_clone);
extern void toku_brtnode_pe_est_callback(void* brtnode_pv, long* bytes_freed_estimate, enum partial_eviction_cost *cost, void* write_extraargs); extern int toku_brtnode_fetch_callback (CACHEFILE cachefile, int fd, BLOCKNUM nodename, u_int32_t fullhash, void **brtnode_pv, void** UU(disk_data), PAIR_ATTR *sizep, int*dirty, void*extraargs);
extern void toku_brtnode_pe_est_callback(void* brtnode_pv, void* disk_data, long* bytes_freed_estimate, enum partial_eviction_cost *cost, void* write_extraargs);
extern int toku_brtnode_pe_callback (void *brtnode_pv, PAIR_ATTR old_attr, PAIR_ATTR* new_attr, void *extraargs); extern int toku_brtnode_pe_callback (void *brtnode_pv, PAIR_ATTR old_attr, PAIR_ATTR* new_attr, void *extraargs);
extern BOOL toku_brtnode_pf_req_callback(void* brtnode_pv, void* read_extraargs); extern BOOL toku_brtnode_pf_req_callback(void* brtnode_pv, void* read_extraargs);
int toku_brtnode_pf_callback(void* brtnode_pv, void* read_extraargs, int fd, PAIR_ATTR* sizep); int toku_brtnode_pf_callback(void* brtnode_pv, void* UU(disk_data), void* read_extraargs, int fd, PAIR_ATTR* sizep);
extern int toku_brtnode_cleaner_callback( void *brtnode_pv, BLOCKNUM blocknum, u_int32_t fullhash, void *extraargs); extern int toku_brtnode_cleaner_callback( void *brtnode_pv, BLOCKNUM blocknum, u_int32_t fullhash, void *extraargs);
extern int toku_brt_alloc_init_header(BRT t, TOKUTXN txn); extern int toku_brt_alloc_init_header(BRT t, TOKUTXN txn);
extern int toku_read_brt_header_and_store_in_cachefile (BRT brt, CACHEFILE cf, LSN max_acceptable_lsn, struct brt_header **header, BOOL* was_open); extern int toku_read_brt_header_and_store_in_cachefile (BRT brt, CACHEFILE cf, LSN max_acceptable_lsn, struct brt_header **header, BOOL* was_open);
...@@ -546,6 +556,7 @@ static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_node(struct brt_ ...@@ -546,6 +556,7 @@ static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_node(struct brt_
wc.pe_est_callback = toku_brtnode_pe_est_callback; wc.pe_est_callback = toku_brtnode_pe_est_callback;
wc.pe_callback = toku_brtnode_pe_callback; wc.pe_callback = toku_brtnode_pe_callback;
wc.cleaner_callback = toku_brtnode_cleaner_callback; wc.cleaner_callback = toku_brtnode_cleaner_callback;
wc.clone_callback = toku_brtnode_clone_callback;
wc.write_extraargs = h; wc.write_extraargs = h;
return wc; return wc;
} }
...@@ -900,6 +911,9 @@ typedef enum { ...@@ -900,6 +911,9 @@ typedef enum {
BRT_STATUS_NUM_ROWS BRT_STATUS_NUM_ROWS
} brt_status_entry; } brt_status_entry;
void brt_begin_checkpoint(void);
void brt_end_checkpoint(void);
typedef struct { typedef struct {
bool initialized; bool initialized;
TOKU_ENGINE_STATUS_ROW_S status[BRT_STATUS_NUM_ROWS]; TOKU_ENGINE_STATUS_ROW_S status[BRT_STATUS_NUM_ROWS];
......
This diff is collapsed.
...@@ -98,6 +98,7 @@ int toku_testsetup_get_sersize(BRT brt, BLOCKNUM diskoff) // Return the size on ...@@ -98,6 +98,7 @@ int toku_testsetup_get_sersize(BRT brt, BLOCKNUM diskoff) // Return the size on
toku_brtnode_fetch_callback, toku_brtnode_fetch_callback,
toku_brtnode_pf_req_callback, toku_brtnode_pf_req_callback,
toku_brtnode_pf_callback, toku_brtnode_pf_callback,
TRUE,
&bfe &bfe
); );
assert(r==0); assert(r==0);
...@@ -124,6 +125,7 @@ int toku_testsetup_insert_to_leaf (BRT brt, BLOCKNUM blocknum, char *key, int ke ...@@ -124,6 +125,7 @@ int toku_testsetup_insert_to_leaf (BRT brt, BLOCKNUM blocknum, char *key, int ke
toku_brtnode_fetch_callback, toku_brtnode_fetch_callback,
toku_brtnode_pf_req_callback, toku_brtnode_pf_req_callback,
toku_brtnode_pf_callback, toku_brtnode_pf_callback,
TRUE,
&bfe &bfe
); );
if (r!=0) return r; if (r!=0) return r;
...@@ -172,6 +174,7 @@ toku_pin_node_with_min_bfe(BRTNODE* node, BLOCKNUM b, BRT t) ...@@ -172,6 +174,7 @@ toku_pin_node_with_min_bfe(BRTNODE* node, BLOCKNUM b, BRT t)
b, b,
toku_cachetable_hash(t->h->cf, b), toku_cachetable_hash(t->h->cf, b),
&bfe, &bfe,
TRUE,
0, 0,
NULL, NULL,
node node
...@@ -196,6 +199,7 @@ int toku_testsetup_insert_to_nonleaf (BRT brt, BLOCKNUM blocknum, enum brt_msg_t ...@@ -196,6 +199,7 @@ int toku_testsetup_insert_to_nonleaf (BRT brt, BLOCKNUM blocknum, enum brt_msg_t
toku_brtnode_fetch_callback, toku_brtnode_fetch_callback,
toku_brtnode_pf_req_callback, toku_brtnode_pf_req_callback,
toku_brtnode_pf_callback, toku_brtnode_pf_callback,
TRUE,
&bfe &bfe
); );
if (r!=0) return r; if (r!=0) return r;
......
...@@ -215,6 +215,7 @@ toku_get_node_for_verify( ...@@ -215,6 +215,7 @@ toku_get_node_for_verify(
blocknum, blocknum,
fullhash, fullhash,
&bfe, &bfe,
TRUE, // may_modify_node, safe to set to TRUE
0, 0,
NULL, NULL,
nodep nodep
......
This diff is collapsed.
...@@ -123,8 +123,9 @@ static void ...@@ -123,8 +123,9 @@ static void
dump_node (int f, BLOCKNUM blocknum, struct brt_header *h) { dump_node (int f, BLOCKNUM blocknum, struct brt_header *h) {
BRTNODE n; BRTNODE n;
struct brtnode_fetch_extra bfe; struct brtnode_fetch_extra bfe;
BRTNODE_DISK_DATA ndd = NULL;
fill_bfe_for_full_read(&bfe, h); fill_bfe_for_full_read(&bfe, h);
int r = toku_deserialize_brtnode_from (f, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, &bfe); int r = toku_deserialize_brtnode_from (f, blocknum, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(n!=0); assert(n!=0);
printf("brtnode\n"); printf("brtnode\n");
...@@ -207,6 +208,7 @@ dump_node (int f, BLOCKNUM blocknum, struct brt_header *h) { ...@@ -207,6 +208,7 @@ dump_node (int f, BLOCKNUM blocknum, struct brt_header *h) {
} }
} }
toku_brtnode_free(&n); toku_brtnode_free(&n);
toku_free(ndd);
} }
static void static void
...@@ -226,9 +228,10 @@ static int ...@@ -226,9 +228,10 @@ static int
fragmentation_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void *extra) { fragmentation_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void *extra) {
frag_help_extra *info = extra; frag_help_extra *info = extra;
BRTNODE n; BRTNODE n;
BRTNODE_DISK_DATA ndd = NULL;
struct brtnode_fetch_extra bfe; struct brtnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, info->h); fill_bfe_for_full_read(&bfe, info->h);
int r = toku_deserialize_brtnode_from(info->f, b, 0 /*pass zero for hash, it doesn't matter*/, &n, &bfe); int r = toku_deserialize_brtnode_from(info->f, b, 0 /*pass zero for hash, it doesn't matter*/, &n, &ndd, &bfe);
if (r==0) { if (r==0) {
info->blocksizes += size; info->blocksizes += size;
if (n->height == 0) { if (n->height == 0) {
...@@ -236,6 +239,7 @@ fragmentation_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void *extra) ...@@ -236,6 +239,7 @@ fragmentation_helper(BLOCKNUM b, int64_t size, int64_t UU(address), void *extra)
info->leafblocks++; info->leafblocks++;
} }
toku_brtnode_free(&n); toku_brtnode_free(&n);
toku_free(ndd);
} }
return 0; return 0;
} }
...@@ -282,9 +286,10 @@ static int ...@@ -282,9 +286,10 @@ static int
garbage_helper(BLOCKNUM b, int64_t UU(size), int64_t UU(address), void *extra) { garbage_helper(BLOCKNUM b, int64_t UU(size), int64_t UU(address), void *extra) {
garbage_help_extra *info = extra; garbage_help_extra *info = extra;
BRTNODE n; BRTNODE n;
BRTNODE_DISK_DATA ndd = NULL;
struct brtnode_fetch_extra bfe; struct brtnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, info->h); fill_bfe_for_full_read(&bfe, info->h);
int r = toku_deserialize_brtnode_from(info->f, b, 0, &n, &bfe); int r = toku_deserialize_brtnode_from(info->f, b, 0, &n, &ndd, &bfe);
if (r != 0) { if (r != 0) {
goto no_node; goto no_node;
} }
...@@ -300,6 +305,7 @@ garbage_helper(BLOCKNUM b, int64_t UU(size), int64_t UU(address), void *extra) { ...@@ -300,6 +305,7 @@ garbage_helper(BLOCKNUM b, int64_t UU(size), int64_t UU(address), void *extra) {
} }
exit: exit:
toku_brtnode_free(&n); toku_brtnode_free(&n);
toku_free(ndd);
no_node: no_node:
return r; return r;
} }
......
...@@ -2806,7 +2806,8 @@ static void finish_leafnode (struct dbout *out, struct leaf_buf *lbuf, int progr ...@@ -2806,7 +2806,8 @@ static void finish_leafnode (struct dbout *out, struct leaf_buf *lbuf, int progr
// serialize leaf to buffer // serialize leaf to buffer
size_t serialized_leaf_size = 0; size_t serialized_leaf_size = 0;
char *serialized_leaf = NULL; char *serialized_leaf = NULL;
result = toku_serialize_brtnode_to_memory(lbuf->node, target_basementnodesize, &serialized_leaf_size, &serialized_leaf); BRTNODE_DISK_DATA ndd = NULL;
result = toku_serialize_brtnode_to_memory(lbuf->node, &ndd, target_basementnodesize, TRUE, &serialized_leaf_size, &serialized_leaf);
// write it out // write it out
if (result == 0) { if (result == 0) {
...@@ -2822,8 +2823,10 @@ static void finish_leafnode (struct dbout *out, struct leaf_buf *lbuf, int progr ...@@ -2822,8 +2823,10 @@ static void finish_leafnode (struct dbout *out, struct leaf_buf *lbuf, int progr
} }
// free the node // free the node
if (serialized_leaf) if (serialized_leaf) {
toku_free(ndd);
toku_free(serialized_leaf); toku_free(serialized_leaf);
}
toku_brtnode_free(&lbuf->node); toku_brtnode_free(&lbuf->node);
xids_destroy(&lbuf->xids); xids_destroy(&lbuf->xids);
toku_free(lbuf); toku_free(lbuf);
...@@ -3015,11 +3018,12 @@ static void write_nonleaf_node (BRTLOADER bl, struct dbout *out, int64_t blocknu ...@@ -3015,11 +3018,12 @@ static void write_nonleaf_node (BRTLOADER bl, struct dbout *out, int64_t blocknu
BP_STATE(node,i) = PT_AVAIL; BP_STATE(node,i) = PT_AVAIL;
} }
BRTNODE_DISK_DATA ndd = NULL;
if (result == 0) { if (result == 0) {
size_t n_bytes; size_t n_bytes;
char *bytes; char *bytes;
int r; int r;
r = toku_serialize_brtnode_to_memory(node, target_basementnodesize, &n_bytes, &bytes); r = toku_serialize_brtnode_to_memory(node, &ndd, target_basementnodesize, TRUE, &n_bytes, &bytes);
if (r) { if (r) {
result = r; result = r;
} else { } else {
...@@ -3049,6 +3053,7 @@ static void write_nonleaf_node (BRTLOADER bl, struct dbout *out, int64_t blocknu ...@@ -3049,6 +3053,7 @@ static void write_nonleaf_node (BRTLOADER bl, struct dbout *out, int64_t blocknu
toku_free(node->bp); toku_free(node->bp);
toku_free(node->childkeys); toku_free(node->childkeys);
toku_free(node); toku_free(node);
toku_free(ndd);
toku_free(subtree_info); toku_free(subtree_info);
blocknum_of_new_node = blocknum_of_new_node; blocknum_of_new_node = blocknum_of_new_node;
......
...@@ -31,6 +31,7 @@ typedef bool BOOL; ...@@ -31,6 +31,7 @@ typedef bool BOOL;
typedef struct brt *BRT; typedef struct brt *BRT;
typedef struct brtnode *BRTNODE; typedef struct brtnode *BRTNODE;
typedef struct brtnode_disk_data *BRTNODE_DISK_DATA;
typedef struct brtnode_leaf_basement_node *BASEMENTNODE; typedef struct brtnode_leaf_basement_node *BASEMENTNODE;
typedef struct brtnode_nonleaf_childinfo *NONLEAF_CHILDINFO; typedef struct brtnode_nonleaf_childinfo *NONLEAF_CHILDINFO;
typedef struct sub_block *SUB_BLOCK; typedef struct sub_block *SUB_BLOCK;
......
This diff is collapsed.
...@@ -130,14 +130,14 @@ enum cachetable_dirty { ...@@ -130,14 +130,14 @@ enum cachetable_dirty {
// When for_checkpoint is true, this was a 'pending' write // When for_checkpoint is true, this was a 'pending' write
// Returns: 0 if success, otherwise an error number. // Returns: 0 if success, otherwise an error number.
// Can access fd (fd is protected by a readlock during call) // Can access fd (fd is protected by a readlock during call)
typedef void (*CACHETABLE_FLUSH_CALLBACK)(CACHEFILE, int fd, CACHEKEY key, void *value, void *write_extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, BOOL write_me, BOOL keep_me, BOOL for_checkpoint); typedef void (*CACHETABLE_FLUSH_CALLBACK)(CACHEFILE, int fd, CACHEKEY key, void *value, void **disk_data, void *write_extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, BOOL write_me, BOOL keep_me, BOOL for_checkpoint, BOOL is_clone);
// The fetch callback is called when a thread is attempting to get and pin a memory // The fetch callback is called when a thread is attempting to get and pin a memory
// object and it is not in the cachetable. // object and it is not in the cachetable.
// Returns: 0 if success, otherwise an error number. The address and size of the object // Returns: 0 if success, otherwise an error number. The address and size of the object
// associated with the key are returned. // associated with the key are returned.
// Can access fd (fd is protected by a readlock during call) // Can access fd (fd is protected by a readlock during call)
typedef int (*CACHETABLE_FETCH_CALLBACK)(CACHEFILE, int fd, CACHEKEY key, u_int32_t fullhash, void **value, PAIR_ATTR *sizep, int *dirtyp, void *read_extraargs); typedef int (*CACHETABLE_FETCH_CALLBACK)(CACHEFILE, int fd, CACHEKEY key, u_int32_t fullhash, void **value_data, void **disk_data, PAIR_ATTR *sizep, int *dirtyp, void *read_extraargs);
// The cachetable calls the partial eviction estimate callback to determine if // The cachetable calls the partial eviction estimate callback to determine if
// partial eviction is a cheap operation that may be called by on the client thread // partial eviction is a cheap operation that may be called by on the client thread
...@@ -147,7 +147,7 @@ typedef int (*CACHETABLE_FETCH_CALLBACK)(CACHEFILE, int fd, CACHEKEY key, u_int3 ...@@ -147,7 +147,7 @@ typedef int (*CACHETABLE_FETCH_CALLBACK)(CACHEFILE, int fd, CACHEKEY key, u_int3
// to return an estimate of the number of bytes it will free // to return an estimate of the number of bytes it will free
// so that the cachetable can estimate how much data is being evicted on background threads. // so that the cachetable can estimate how much data is being evicted on background threads.
// If cost is PE_CHEAP, then the callback does not set bytes_freed_estimate. // If cost is PE_CHEAP, then the callback does not set bytes_freed_estimate.
typedef void (*CACHETABLE_PARTIAL_EVICTION_EST_CALLBACK)(void *brtnode_pv, long* bytes_freed_estimate, enum partial_eviction_cost *cost, void *write_extraargs); typedef void (*CACHETABLE_PARTIAL_EVICTION_EST_CALLBACK)(void *brtnode_pv, void* disk_data, long* bytes_freed_estimate, enum partial_eviction_cost *cost, void *write_extraargs);
// The cachetable calls the partial eviction callback is to possibly try and partially evict pieces // The cachetable calls the partial eviction callback is to possibly try and partially evict pieces
// of the PAIR. The callback determines the strategy for what to evict. The callback may choose to free // of the PAIR. The callback determines the strategy for what to evict. The callback may choose to free
...@@ -173,16 +173,19 @@ typedef BOOL (*CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK)(void *brtnode_pv, voi ...@@ -173,16 +173,19 @@ typedef BOOL (*CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK)(void *brtnode_pv, voi
// The new PAIR_ATTR of the PAIR is returned in sizep // The new PAIR_ATTR of the PAIR is returned in sizep
// Can access fd (fd is protected by a readlock during call) // Can access fd (fd is protected by a readlock during call)
// Returns: 0 if success, otherwise an error number. // Returns: 0 if success, otherwise an error number.
typedef int (*CACHETABLE_PARTIAL_FETCH_CALLBACK)(void *brtnode_pv, void *read_extraargs, int fd, PAIR_ATTR *sizep); typedef int (*CACHETABLE_PARTIAL_FETCH_CALLBACK)(void *value_data, void* disk_data, void *read_extraargs, int fd, PAIR_ATTR *sizep);
// TODO(leif) XXX TODO XXX // TODO(leif) XXX TODO XXX
typedef int (*CACHETABLE_CLEANER_CALLBACK)(void *brtnode_pv, BLOCKNUM blocknum, u_int32_t fullhash, void *write_extraargs); typedef int (*CACHETABLE_CLEANER_CALLBACK)(void *brtnode_pv, BLOCKNUM blocknum, u_int32_t fullhash, void *write_extraargs);
typedef void (*CACHETABLE_CLONE_CALLBACK)(void* value_data, void** cloned_value_data, PAIR_ATTR* new_attr, BOOL for_checkpoint, void* write_extraargs);
typedef struct { typedef struct {
CACHETABLE_FLUSH_CALLBACK flush_callback; CACHETABLE_FLUSH_CALLBACK flush_callback;
CACHETABLE_PARTIAL_EVICTION_EST_CALLBACK pe_est_callback; CACHETABLE_PARTIAL_EVICTION_EST_CALLBACK pe_est_callback;
CACHETABLE_PARTIAL_EVICTION_CALLBACK pe_callback; CACHETABLE_PARTIAL_EVICTION_CALLBACK pe_callback;
CACHETABLE_CLEANER_CALLBACK cleaner_callback; CACHETABLE_CLEANER_CALLBACK cleaner_callback;
CACHETABLE_CLONE_CALLBACK clone_callback;
void* write_extraargs; // parameter for flush_callback, pe_est_callback, pe_callback, and cleaner_callback void* write_extraargs; // parameter for flush_callback, pe_est_callback, pe_callback, and cleaner_callback
} CACHETABLE_WRITE_CALLBACK; } CACHETABLE_WRITE_CALLBACK;
...@@ -262,6 +265,7 @@ int toku_cachetable_get_and_pin_with_dep_pairs ( ...@@ -262,6 +265,7 @@ int toku_cachetable_get_and_pin_with_dep_pairs (
CACHETABLE_FETCH_CALLBACK fetch_callback, CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback, CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback, CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
BOOL may_modify_value,
void* read_extraargs, // parameter for fetch_callback, pf_req_callback, and pf_callback void* read_extraargs, // parameter for fetch_callback, pf_req_callback, and pf_callback
u_int32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint u_int32_t num_dependent_pairs, // number of dependent pairs that we may need to checkpoint
CACHEFILE* dependent_cfs, // array of cachefiles of dependent pairs CACHEFILE* dependent_cfs, // array of cachefiles of dependent pairs
...@@ -286,9 +290,20 @@ int toku_cachetable_get_and_pin ( ...@@ -286,9 +290,20 @@ int toku_cachetable_get_and_pin (
CACHETABLE_FETCH_CALLBACK fetch_callback, CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback, CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback,
CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback, CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
BOOL may_modify_value,
void* read_extraargs // parameter for fetch_callback, pf_req_callback, and pf_callback void* read_extraargs // parameter for fetch_callback, pf_req_callback, and pf_callback
); );
// does partial fetch on a pinned pair
void toku_cachetable_pf_pinned_pair(
void* value,
CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback,
void* read_extraargs,
CACHEFILE cf,
CACHEKEY key,
u_int32_t fullhash
);
struct unlockers { struct unlockers {
BOOL locked; BOOL locked;
void (*f)(void*extra); void (*f)(void*extra);
...@@ -309,6 +324,7 @@ int toku_cachetable_get_and_pin_nonblocking ( ...@@ -309,6 +324,7 @@ int toku_cachetable_get_and_pin_nonblocking (
CACHETABLE_FETCH_CALLBACK fetch_callback, CACHETABLE_FETCH_CALLBACK fetch_callback,
CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback __attribute__((unused)), CACHETABLE_PARTIAL_FETCH_REQUIRED_CALLBACK pf_req_callback __attribute__((unused)),
CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback __attribute__((unused)), CACHETABLE_PARTIAL_FETCH_CALLBACK pf_callback __attribute__((unused)),
BOOL may_modify_value,
void *read_extraargs, // parameter for fetch_callback, pf_req_callback, and pf_callback void *read_extraargs, // parameter for fetch_callback, pf_req_callback, and pf_callback
UNLOCKERS unlockers UNLOCKERS unlockers
); );
......
...@@ -310,9 +310,9 @@ toku_checkpoint(CACHETABLE ct, TOKULOGGER logger, ...@@ -310,9 +310,9 @@ toku_checkpoint(CACHETABLE ct, TOKULOGGER logger,
SET_CHECKPOINT_FOOTPRINT(40); SET_CHECKPOINT_FOOTPRINT(40);
if (r==0) { if (r==0) {
if (callback_f) if (callback_f)
callback_f(extra); // callback is called with checkpoint_safe_lock still held callback_f(extra); // callback is called with checkpoint_safe_lock still held
r = toku_cachetable_end_checkpoint(ct, logger, ydb_lock, ydb_unlock, callback2_f, extra2); r = toku_cachetable_end_checkpoint(ct, logger, ydb_lock, ydb_unlock, callback2_f, extra2);
} }
SET_CHECKPOINT_FOOTPRINT(50); SET_CHECKPOINT_FOOTPRINT(50);
if (r==0 && logger) { if (r==0 && logger) {
......
...@@ -226,3 +226,19 @@ DBT *fill_dbt_for_fifo_entry(DBT *dbt, const struct fifo_entry *entry) { ...@@ -226,3 +226,19 @@ DBT *fill_dbt_for_fifo_entry(DBT *dbt, const struct fifo_entry *entry) {
const struct fifo_entry *toku_fifo_get_entry(FIFO fifo, long off) { const struct fifo_entry *toku_fifo_get_entry(FIFO fifo, long off) {
return toku_fifo_iterate_internal_get_entry(fifo, off); return toku_fifo_iterate_internal_get_entry(fifo, off);
} }
void toku_fifo_clone(FIFO orig_fifo, FIFO* cloned_fifo) {
struct fifo *XMALLOC(new_fifo);
assert(new_fifo);
new_fifo->n_items_in_fifo = orig_fifo->n_items_in_fifo;
new_fifo->memory_start = 0;
new_fifo->memory_used = orig_fifo->memory_used - orig_fifo->memory_start;
new_fifo->memory_size = new_fifo->memory_used;
new_fifo->memory = toku_xmalloc(new_fifo->memory_size);
memcpy(
new_fifo->memory,
orig_fifo->memory + orig_fifo->memory_start,
new_fifo->memory_size
);
*cloned_fifo = new_fifo;
}
...@@ -110,6 +110,8 @@ struct fifo_entry * toku_fifo_iterate_internal_get_entry(FIFO fifo, int off); ...@@ -110,6 +110,8 @@ struct fifo_entry * toku_fifo_iterate_internal_get_entry(FIFO fifo, int off);
DBT *fill_dbt_for_fifo_entry(DBT *dbt, const struct fifo_entry *entry); DBT *fill_dbt_for_fifo_entry(DBT *dbt, const struct fifo_entry *entry);
const struct fifo_entry *toku_fifo_get_entry(FIFO fifo, long off); const struct fifo_entry *toku_fifo_get_entry(FIFO fifo, long off);
void toku_fifo_clone(FIFO orig_fifo, FIFO* cloned_fifo);
#if defined(__cplusplus) || defined(__cilkplusplus) #if defined(__cplusplus) || defined(__cilkplusplus)
}; };
#endif #endif
......
...@@ -137,3 +137,11 @@ size_t toku_mempool_footprint(struct mempool *mp) { ...@@ -137,3 +137,11 @@ size_t toku_mempool_footprint(struct mempool *mp) {
size_t rval = toku_memory_footprint(base, touched); size_t rval = toku_memory_footprint(base, touched);
return rval; return rval;
} }
void toku_mempool_clone(struct mempool* orig_mp, struct mempool* new_mp) {
new_mp->frag_size = orig_mp->frag_size;
new_mp->free_offset = orig_mp->free_offset;
new_mp->size = orig_mp->free_offset; // only make the cloned mempool store what is needed
new_mp->base = toku_xmalloc(new_mp->size);
memcpy(new_mp->base, orig_mp->base, new_mp->size);
}
...@@ -83,6 +83,8 @@ static inline int toku_mempool_inrange(struct mempool *mp, void *vp, size_t size ...@@ -83,6 +83,8 @@ static inline int toku_mempool_inrange(struct mempool *mp, void *vp, size_t size
/* get memory footprint */ /* get memory footprint */
size_t toku_mempool_footprint(struct mempool *mp); size_t toku_mempool_footprint(struct mempool *mp);
void toku_mempool_clone(struct mempool* orig_mp, struct mempool* new_mp);
#if defined(__cplusplus) || defined(__cilkplusplus) #if defined(__cplusplus) || defined(__cilkplusplus)
}; };
#endif #endif
......
...@@ -492,8 +492,8 @@ toku_rollback_log_free(ROLLBACK_LOG_NODE *log_p) { ...@@ -492,8 +492,8 @@ toku_rollback_log_free(ROLLBACK_LOG_NODE *log_p) {
} }
static void toku_rollback_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM logname, static void toku_rollback_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM logname,
void *rollback_v, void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size, void *rollback_v, void** UU(disk_data), void *extraargs, PAIR_ATTR size, PAIR_ATTR* new_size,
BOOL write_me, BOOL keep_me, BOOL for_checkpoint) { BOOL write_me, BOOL keep_me, BOOL for_checkpoint, BOOL UU(is_clone)) {
int r; int r;
ROLLBACK_LOG_NODE log = rollback_v; ROLLBACK_LOG_NODE log = rollback_v;
struct brt_header *h = extraargs; struct brt_header *h = extraargs;
...@@ -524,7 +524,7 @@ static void toku_rollback_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM ...@@ -524,7 +524,7 @@ static void toku_rollback_flush_callback (CACHEFILE cachefile, int fd, BLOCKNUM
} }
static int toku_rollback_fetch_callback (CACHEFILE cachefile, int fd, BLOCKNUM logname, u_int32_t fullhash, static int toku_rollback_fetch_callback (CACHEFILE cachefile, int fd, BLOCKNUM logname, u_int32_t fullhash,
void **rollback_pv, PAIR_ATTR *sizep, int * UU(dirtyp), void *extraargs) { void **rollback_pv, void** UU(disk_data), PAIR_ATTR *sizep, int * UU(dirtyp), void *extraargs) {
int r; int r;
struct brt_header *h = extraargs; struct brt_header *h = extraargs;
assert(h->cf == cachefile); assert(h->cf == cachefile);
...@@ -539,6 +539,7 @@ static int toku_rollback_fetch_callback (CACHEFILE cachefile, int fd, BLOCKNUM l ...@@ -539,6 +539,7 @@ static int toku_rollback_fetch_callback (CACHEFILE cachefile, int fd, BLOCKNUM l
static void toku_rollback_pe_est_callback( static void toku_rollback_pe_est_callback(
void* rollback_v, void* rollback_v,
void* UU(disk_data),
long* bytes_freed_estimate, long* bytes_freed_estimate,
enum partial_eviction_cost *cost, enum partial_eviction_cost *cost,
void* UU(write_extraargs) void* UU(write_extraargs)
...@@ -565,7 +566,7 @@ static BOOL toku_rollback_pf_req_callback(void* UU(brtnode_pv), void* UU(read_ex ...@@ -565,7 +566,7 @@ static BOOL toku_rollback_pf_req_callback(void* UU(brtnode_pv), void* UU(read_ex
return FALSE; return FALSE;
} }
static int toku_rollback_pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* UU(sizep)) { static int toku_rollback_pf_callback(void* UU(brtnode_pv), void* UU(disk_data), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* UU(sizep)) {
// should never be called, given that toku_rollback_pf_req_callback always returns false // should never be called, given that toku_rollback_pf_req_callback always returns false
assert(FALSE); assert(FALSE);
return 0; return 0;
...@@ -588,6 +589,7 @@ static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_rollback_log(str ...@@ -588,6 +589,7 @@ static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_rollback_log(str
wc.pe_est_callback = toku_rollback_pe_est_callback; wc.pe_est_callback = toku_rollback_pe_est_callback;
wc.pe_callback = toku_rollback_pe_callback; wc.pe_callback = toku_rollback_pe_callback;
wc.cleaner_callback = toku_rollback_cleaner_callback; wc.cleaner_callback = toku_rollback_cleaner_callback;
wc.clone_callback = NULL;
wc.write_extraargs = h; wc.write_extraargs = h;
return wc; return wc;
} }
...@@ -873,6 +875,7 @@ int toku_get_and_pin_rollback_log(TOKUTXN txn, TXNID xid, uint64_t sequence, BLO ...@@ -873,6 +875,7 @@ int toku_get_and_pin_rollback_log(TOKUTXN txn, TXNID xid, uint64_t sequence, BLO
toku_rollback_fetch_callback, toku_rollback_fetch_callback,
toku_rollback_pf_req_callback, toku_rollback_pf_req_callback,
toku_rollback_pf_callback, toku_rollback_pf_callback,
TRUE, // may_modify_value
h h
); );
assert(r==0); assert(r==0);
......
...@@ -36,24 +36,26 @@ test_prefetch_read(int fd, BRT UU(brt), struct brt_header *brt_h) { ...@@ -36,24 +36,26 @@ test_prefetch_read(int fd, BRT UU(brt), struct brt_header *brt_h) {
// disable_prefetching to TRUE // disable_prefetching to TRUE
cursor->disable_prefetching = TRUE; cursor->disable_prefetching = TRUE;
fill_bfe_for_prefetch(&bfe, brt_h, cursor); fill_bfe_for_prefetch(&bfe, brt_h, cursor);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe); BRTNODE_DISK_DATA ndd = NULL;
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
r = toku_brtnode_pf_callback(dn, &bfe, fd, &attr); r = toku_brtnode_pf_callback(dn, ndd, &bfe, fd, &attr);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
destroy_bfe_for_prefetch(&bfe); destroy_bfe_for_prefetch(&bfe);
toku_brtnode_free(&dn); toku_brtnode_free(&dn);
toku_free(ndd);
// now enable prefetching again // now enable prefetching again
cursor->disable_prefetching = FALSE; cursor->disable_prefetching = FALSE;
fill_bfe_for_prefetch(&bfe, brt_h, cursor); fill_bfe_for_prefetch(&bfe, brt_h, cursor);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe); r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
assert(BP_STATE(dn,0) == PT_AVAIL); assert(BP_STATE(dn,0) == PT_AVAIL);
...@@ -63,18 +65,19 @@ test_prefetch_read(int fd, BRT UU(brt), struct brt_header *brt_h) { ...@@ -63,18 +65,19 @@ test_prefetch_read(int fd, BRT UU(brt), struct brt_header *brt_h) {
assert(BP_STATE(dn,0) == PT_COMPRESSED); assert(BP_STATE(dn,0) == PT_COMPRESSED);
assert(BP_STATE(dn,1) == PT_COMPRESSED); assert(BP_STATE(dn,1) == PT_COMPRESSED);
assert(BP_STATE(dn,2) == PT_COMPRESSED); assert(BP_STATE(dn,2) == PT_COMPRESSED);
r = toku_brtnode_pf_callback(dn, &bfe, fd, &attr); r = toku_brtnode_pf_callback(dn, ndd, &bfe, fd, &attr);
assert(BP_STATE(dn,0) == PT_AVAIL); assert(BP_STATE(dn,0) == PT_AVAIL);
assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,1) == PT_AVAIL);
assert(BP_STATE(dn,2) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL);
destroy_bfe_for_prefetch(&bfe); destroy_bfe_for_prefetch(&bfe);
toku_brtnode_free(&dn); toku_brtnode_free(&dn);
toku_free(ndd);
u_int64_t left_key = 150; u_int64_t left_key = 150;
toku_fill_dbt(&cursor->range_lock_left_key, &left_key, sizeof(u_int64_t)); toku_fill_dbt(&cursor->range_lock_left_key, &left_key, sizeof(u_int64_t));
cursor->left_is_neg_infty = FALSE; cursor->left_is_neg_infty = FALSE;
fill_bfe_for_prefetch(&bfe, brt_h, cursor); fill_bfe_for_prefetch(&bfe, brt_h, cursor);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe); r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
...@@ -84,18 +87,19 @@ test_prefetch_read(int fd, BRT UU(brt), struct brt_header *brt_h) { ...@@ -84,18 +87,19 @@ test_prefetch_read(int fd, BRT UU(brt), struct brt_header *brt_h) {
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_COMPRESSED); assert(BP_STATE(dn,1) == PT_COMPRESSED);
assert(BP_STATE(dn,2) == PT_COMPRESSED); assert(BP_STATE(dn,2) == PT_COMPRESSED);
r = toku_brtnode_pf_callback(dn, &bfe, fd, &attr); r = toku_brtnode_pf_callback(dn, ndd, &bfe, fd, &attr);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,1) == PT_AVAIL);
assert(BP_STATE(dn,2) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL);
destroy_bfe_for_prefetch(&bfe); destroy_bfe_for_prefetch(&bfe);
toku_brtnode_free(&dn); toku_brtnode_free(&dn);
toku_free(ndd);
u_int64_t right_key = 151; u_int64_t right_key = 151;
toku_fill_dbt(&cursor->range_lock_right_key, &right_key, sizeof(u_int64_t)); toku_fill_dbt(&cursor->range_lock_right_key, &right_key, sizeof(u_int64_t));
cursor->right_is_pos_infty = FALSE; cursor->right_is_pos_infty = FALSE;
fill_bfe_for_prefetch(&bfe, brt_h, cursor); fill_bfe_for_prefetch(&bfe, brt_h, cursor);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe); r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
...@@ -105,17 +109,18 @@ test_prefetch_read(int fd, BRT UU(brt), struct brt_header *brt_h) { ...@@ -105,17 +109,18 @@ test_prefetch_read(int fd, BRT UU(brt), struct brt_header *brt_h) {
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_COMPRESSED); assert(BP_STATE(dn,1) == PT_COMPRESSED);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
r = toku_brtnode_pf_callback(dn, &bfe, fd, &attr); r = toku_brtnode_pf_callback(dn, ndd, &bfe, fd, &attr);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,1) == PT_AVAIL);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
destroy_bfe_for_prefetch(&bfe); destroy_bfe_for_prefetch(&bfe);
toku_brtnode_free(&dn); toku_brtnode_free(&dn);
toku_free(ndd);
left_key = 100000; left_key = 100000;
right_key = 100000; right_key = 100000;
fill_bfe_for_prefetch(&bfe, brt_h, cursor); fill_bfe_for_prefetch(&bfe, brt_h, cursor);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe); r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
...@@ -125,17 +130,18 @@ test_prefetch_read(int fd, BRT UU(brt), struct brt_header *brt_h) { ...@@ -125,17 +130,18 @@ test_prefetch_read(int fd, BRT UU(brt), struct brt_header *brt_h) {
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_COMPRESSED); assert(BP_STATE(dn,2) == PT_COMPRESSED);
r = toku_brtnode_pf_callback(dn, &bfe, fd, &attr); r = toku_brtnode_pf_callback(dn, ndd, &bfe, fd, &attr);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL);
destroy_bfe_for_prefetch(&bfe); destroy_bfe_for_prefetch(&bfe);
toku_free(ndd);
toku_brtnode_free(&dn); toku_brtnode_free(&dn);
left_key = 100; left_key = 100;
right_key = 100; right_key = 100;
fill_bfe_for_prefetch(&bfe, brt_h, cursor); fill_bfe_for_prefetch(&bfe, brt_h, cursor);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe); r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
assert(BP_STATE(dn,0) == PT_AVAIL); assert(BP_STATE(dn,0) == PT_AVAIL);
...@@ -145,12 +151,13 @@ test_prefetch_read(int fd, BRT UU(brt), struct brt_header *brt_h) { ...@@ -145,12 +151,13 @@ test_prefetch_read(int fd, BRT UU(brt), struct brt_header *brt_h) {
assert(BP_STATE(dn,0) == PT_COMPRESSED); assert(BP_STATE(dn,0) == PT_COMPRESSED);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
r = toku_brtnode_pf_callback(dn, &bfe, fd, &attr); r = toku_brtnode_pf_callback(dn, ndd, &bfe, fd, &attr);
assert(BP_STATE(dn,0) == PT_AVAIL); assert(BP_STATE(dn,0) == PT_AVAIL);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
destroy_bfe_for_prefetch(&bfe); destroy_bfe_for_prefetch(&bfe);
toku_brtnode_free(&dn); toku_brtnode_free(&dn);
toku_free(ndd);
toku_free(cursor); toku_free(cursor);
} }
...@@ -161,6 +168,7 @@ test_subset_read(int fd, BRT UU(brt), struct brt_header *brt_h) { ...@@ -161,6 +168,7 @@ test_subset_read(int fd, BRT UU(brt), struct brt_header *brt_h) {
brt_h->compare_fun = int64_key_cmp; brt_h->compare_fun = int64_key_cmp;
BRT_CURSOR cursor = toku_malloc(sizeof *cursor); BRT_CURSOR cursor = toku_malloc(sizeof *cursor);
BRTNODE dn = NULL; BRTNODE dn = NULL;
BRTNODE_DISK_DATA ndd = NULL;
PAIR_ATTR attr; PAIR_ATTR attr;
// first test that prefetching everything should work // first test that prefetching everything should work
...@@ -191,7 +199,7 @@ test_subset_read(int fd, BRT UU(brt), struct brt_header *brt_h) { ...@@ -191,7 +199,7 @@ test_subset_read(int fd, BRT UU(brt), struct brt_header *brt_h) {
// set disable_prefetching ON // set disable_prefetching ON
bfe.child_to_read = 2; bfe.child_to_read = 2;
bfe.disable_prefetching = TRUE; bfe.disable_prefetching = TRUE;
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe); r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
...@@ -206,16 +214,17 @@ test_subset_read(int fd, BRT UU(brt), struct brt_header *brt_h) { ...@@ -206,16 +214,17 @@ test_subset_read(int fd, BRT UU(brt), struct brt_header *brt_h) {
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_COMPRESSED); assert(BP_STATE(dn,2) == PT_COMPRESSED);
r = toku_brtnode_pf_callback(dn, &bfe, fd, &attr); r = toku_brtnode_pf_callback(dn, ndd, &bfe, fd, &attr);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_ON_DISK); assert(BP_STATE(dn,1) == PT_ON_DISK);
assert(BP_STATE(dn,2) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL);
toku_brtnode_free(&dn); toku_brtnode_free(&dn);
toku_free(ndd);
// fake the childnum to read // fake the childnum to read
bfe.child_to_read = 2; bfe.child_to_read = 2;
bfe.disable_prefetching = FALSE; bfe.disable_prefetching = FALSE;
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe); r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
...@@ -230,15 +239,16 @@ test_subset_read(int fd, BRT UU(brt), struct brt_header *brt_h) { ...@@ -230,15 +239,16 @@ test_subset_read(int fd, BRT UU(brt), struct brt_header *brt_h) {
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_COMPRESSED); assert(BP_STATE(dn,1) == PT_COMPRESSED);
assert(BP_STATE(dn,2) == PT_COMPRESSED); assert(BP_STATE(dn,2) == PT_COMPRESSED);
r = toku_brtnode_pf_callback(dn, &bfe, fd, &attr); r = toku_brtnode_pf_callback(dn, ndd, &bfe, fd, &attr);
assert(BP_STATE(dn,0) == PT_ON_DISK); assert(BP_STATE(dn,0) == PT_ON_DISK);
assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,1) == PT_AVAIL);
assert(BP_STATE(dn,2) == PT_AVAIL); assert(BP_STATE(dn,2) == PT_AVAIL);
toku_brtnode_free(&dn); toku_brtnode_free(&dn);
toku_free(ndd);
// fake the childnum to read // fake the childnum to read
bfe.child_to_read = 0; bfe.child_to_read = 0;
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe); r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd, &bfe);
assert(r==0); assert(r==0);
assert(dn->n_children == 3); assert(dn->n_children == 3);
assert(BP_STATE(dn,0) == PT_AVAIL); assert(BP_STATE(dn,0) == PT_AVAIL);
...@@ -253,11 +263,12 @@ test_subset_read(int fd, BRT UU(brt), struct brt_header *brt_h) { ...@@ -253,11 +263,12 @@ test_subset_read(int fd, BRT UU(brt), struct brt_header *brt_h) {
assert(BP_STATE(dn,0) == PT_COMPRESSED); assert(BP_STATE(dn,0) == PT_COMPRESSED);
assert(BP_STATE(dn,1) == PT_COMPRESSED); assert(BP_STATE(dn,1) == PT_COMPRESSED);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
r = toku_brtnode_pf_callback(dn, &bfe, fd, &attr); r = toku_brtnode_pf_callback(dn, ndd, &bfe, fd, &attr);
assert(BP_STATE(dn,0) == PT_AVAIL); assert(BP_STATE(dn,0) == PT_AVAIL);
assert(BP_STATE(dn,1) == PT_AVAIL); assert(BP_STATE(dn,1) == PT_AVAIL);
assert(BP_STATE(dn,2) == PT_ON_DISK); assert(BP_STATE(dn,2) == PT_ON_DISK);
toku_brtnode_free(&dn); toku_brtnode_free(&dn);
toku_free(ndd);
toku_free(cursor); toku_free(cursor);
} }
...@@ -345,8 +356,8 @@ test_prefetching(void) { ...@@ -345,8 +356,8 @@ test_prefetching(void) {
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100); assert(size == 100);
} }
BRTNODE_DISK_DATA ndd = NULL;
r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE); r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, &ndd, TRUE, brt->h, 1, 1, FALSE);
assert(r==0); assert(r==0);
test_prefetch_read(fd, brt, brt_h); test_prefetch_read(fd, brt, brt_h);
...@@ -365,6 +376,7 @@ test_prefetching(void) { ...@@ -365,6 +376,7 @@ test_prefetching(void) {
toku_blocktable_destroy(&brt_h->blocktable); toku_blocktable_destroy(&brt_h->blocktable);
toku_free(brt_h); toku_free(brt_h);
toku_free(brt); toku_free(brt);
toku_free(ndd);
r = close(fd); assert(r != -1); r = close(fd); assert(r != -1);
} }
......
...@@ -67,7 +67,8 @@ test1(int fd, struct brt_header *brt_h, BRTNODE *dn) { ...@@ -67,7 +67,8 @@ test1(int fd, struct brt_header *brt_h, BRTNODE *dn) {
struct brtnode_fetch_extra bfe_all; struct brtnode_fetch_extra bfe_all;
brt_h->compare_fun = string_key_cmp; brt_h->compare_fun = string_key_cmp;
fill_bfe_for_full_read(&bfe_all, brt_h); fill_bfe_for_full_read(&bfe_all, brt_h);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &bfe_all); BRTNODE_DISK_DATA ndd = NULL;
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_all);
BOOL is_leaf = ((*dn)->height == 0); BOOL is_leaf = ((*dn)->height == 0);
assert(r==0); assert(r==0);
for (int i = 0; i < (*dn)->n_children; i++) { for (int i = 0; i < (*dn)->n_children; i++) {
...@@ -93,7 +94,7 @@ test1(int fd, struct brt_header *brt_h, BRTNODE *dn) { ...@@ -93,7 +94,7 @@ test1(int fd, struct brt_header *brt_h, BRTNODE *dn) {
PAIR_ATTR size; PAIR_ATTR size;
BOOL req = toku_brtnode_pf_req_callback(*dn, &bfe_all); BOOL req = toku_brtnode_pf_req_callback(*dn, &bfe_all);
assert(req); assert(req);
toku_brtnode_pf_callback(*dn, &bfe_all, fd, &size); toku_brtnode_pf_callback(*dn, ndd, &bfe_all, fd, &size);
toku_brtnode_pe_callback(*dn, attr, &attr, NULL); toku_brtnode_pe_callback(*dn, attr, &attr, NULL);
for (int i = 0; i < (*dn)->n_children; i++) { for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_AVAIL); assert(BP_STATE(*dn,i) == PT_AVAIL);
...@@ -111,7 +112,7 @@ test1(int fd, struct brt_header *brt_h, BRTNODE *dn) { ...@@ -111,7 +112,7 @@ test1(int fd, struct brt_header *brt_h, BRTNODE *dn) {
req = toku_brtnode_pf_req_callback(*dn, &bfe_all); req = toku_brtnode_pf_req_callback(*dn, &bfe_all);
assert(req); assert(req);
toku_brtnode_pf_callback(*dn, &bfe_all, fd, &size); toku_brtnode_pf_callback(*dn, ndd, &bfe_all, fd, &size);
toku_brtnode_pe_callback(*dn, attr, &attr, NULL); toku_brtnode_pe_callback(*dn, attr, &attr, NULL);
for (int i = 0; i < (*dn)->n_children; i++) { for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_AVAIL); assert(BP_STATE(*dn,i) == PT_AVAIL);
...@@ -124,7 +125,7 @@ test1(int fd, struct brt_header *brt_h, BRTNODE *dn) { ...@@ -124,7 +125,7 @@ test1(int fd, struct brt_header *brt_h, BRTNODE *dn) {
for (int i = 0; i < (*dn)->n_children; i++) { for (int i = 0; i < (*dn)->n_children; i++) {
assert(BP_STATE(*dn,i) == PT_AVAIL); assert(BP_STATE(*dn,i) == PT_AVAIL);
} }
toku_free(ndd);
toku_brtnode_free(dn); toku_brtnode_free(dn);
} }
...@@ -160,8 +161,8 @@ test2(int fd, struct brt_header *brt_h, BRTNODE *dn) { ...@@ -160,8 +161,8 @@ test2(int fd, struct brt_header *brt_h, BRTNODE *dn) {
TRUE, TRUE,
FALSE FALSE
); );
BRTNODE_DISK_DATA ndd = NULL;
int r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &bfe_subset); int r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_subset);
assert(r==0); assert(r==0);
BOOL is_leaf = ((*dn)->height == 0); BOOL is_leaf = ((*dn)->height == 0);
// at this point, although both partitions are available, only the // at this point, although both partitions are available, only the
...@@ -182,13 +183,13 @@ test2(int fd, struct brt_header *brt_h, BRTNODE *dn) { ...@@ -182,13 +183,13 @@ test2(int fd, struct brt_header *brt_h, BRTNODE *dn) {
BOOL req = toku_brtnode_pf_req_callback(*dn, &bfe_subset); BOOL req = toku_brtnode_pf_req_callback(*dn, &bfe_subset);
assert(req); assert(req);
toku_brtnode_pf_callback(*dn, &bfe_subset, fd, &attr); toku_brtnode_pf_callback(*dn, ndd, &bfe_subset, fd, &attr);
assert(BP_STATE(*dn, 0) == PT_AVAIL); assert(BP_STATE(*dn, 0) == PT_AVAIL);
assert(BP_STATE(*dn, 1) == PT_AVAIL); assert(BP_STATE(*dn, 1) == PT_AVAIL);
assert(BP_SHOULD_EVICT(*dn, 0)); assert(BP_SHOULD_EVICT(*dn, 0));
assert(!BP_SHOULD_EVICT(*dn, 1)); assert(!BP_SHOULD_EVICT(*dn, 1));
toku_free(ndd);
toku_brtnode_free(dn); toku_brtnode_free(dn);
} }
...@@ -206,8 +207,8 @@ test3_leaf(int fd, struct brt_header *brt_h, BRTNODE *dn) { ...@@ -206,8 +207,8 @@ test3_leaf(int fd, struct brt_header *brt_h, BRTNODE *dn) {
&bfe_min, &bfe_min,
brt_h brt_h
); );
BRTNODE_DISK_DATA ndd = NULL;
int r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &bfe_min); int r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, dn, &ndd, &bfe_min);
assert(r==0); assert(r==0);
// //
// make sure we have a leaf // make sure we have a leaf
...@@ -217,6 +218,7 @@ test3_leaf(int fd, struct brt_header *brt_h, BRTNODE *dn) { ...@@ -217,6 +218,7 @@ test3_leaf(int fd, struct brt_header *brt_h, BRTNODE *dn) {
assert(BP_STATE(*dn, i) == PT_ON_DISK); assert(BP_STATE(*dn, i) == PT_ON_DISK);
} }
toku_brtnode_free(dn); toku_brtnode_free(dn);
toku_free(ndd);
} }
static void static void
...@@ -296,8 +298,8 @@ test_serialize_nonleaf(void) { ...@@ -296,8 +298,8 @@ test_serialize_nonleaf(void) {
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100); assert(size == 100);
} }
BRTNODE_DISK_DATA ndd = NULL;
r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE); r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, &ndd, TRUE, brt->h, 1, 1, FALSE);
assert(r==0); assert(r==0);
test1(fd, brt_h, &dn); test1(fd, brt_h, &dn);
...@@ -309,6 +311,7 @@ test_serialize_nonleaf(void) { ...@@ -309,6 +311,7 @@ test_serialize_nonleaf(void) {
destroy_nonleaf_childinfo(BNC(&sn, 1)); destroy_nonleaf_childinfo(BNC(&sn, 1));
toku_free(sn.bp); toku_free(sn.bp);
toku_free(sn.childkeys); toku_free(sn.childkeys);
toku_free(ndd);
toku_block_free(brt_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); toku_block_free(brt_h->blocktable, BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_brtheader_destroy_treelock(brt_h); toku_brtheader_destroy_treelock(brt_h);
...@@ -382,8 +385,8 @@ test_serialize_leaf(void) { ...@@ -382,8 +385,8 @@ test_serialize_leaf(void) {
assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset == BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
assert(size == 100); assert(size == 100);
} }
BRTNODE_DISK_DATA ndd = NULL;
r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE); r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, &ndd, TRUE, brt->h, 1, 1, FALSE);
assert(r==0); assert(r==0);
test1(fd, brt_h, &dn); test1(fd, brt_h, &dn);
...@@ -408,7 +411,7 @@ test_serialize_leaf(void) { ...@@ -408,7 +411,7 @@ test_serialize_leaf(void) {
toku_blocktable_destroy(&brt_h->blocktable); toku_blocktable_destroy(&brt_h->blocktable);
toku_free(brt_h); toku_free(brt_h);
toku_free(brt); toku_free(brt);
toku_free(ndd);
r = close(fd); assert(r != -1); r = close(fd); assert(r != -1);
} }
......
...@@ -130,7 +130,8 @@ test_serialize_leaf(int valsize, int nelts, double entropy) { ...@@ -130,7 +130,8 @@ test_serialize_leaf(int valsize, int nelts, double entropy) {
struct timeval t[2]; struct timeval t[2];
gettimeofday(&t[0], NULL); gettimeofday(&t[0], NULL);
r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE); BRTNODE_DISK_DATA ndd = NULL;
r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, &ndd, TRUE, brt->h, 1, 1, FALSE);
assert(r==0); assert(r==0);
gettimeofday(&t[1], NULL); gettimeofday(&t[1], NULL);
double dt; double dt;
...@@ -140,7 +141,8 @@ test_serialize_leaf(int valsize, int nelts, double entropy) { ...@@ -140,7 +141,8 @@ test_serialize_leaf(int valsize, int nelts, double entropy) {
struct brtnode_fetch_extra bfe; struct brtnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt_h); fill_bfe_for_full_read(&bfe, brt_h);
gettimeofday(&t[0], NULL); gettimeofday(&t[0], NULL);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe); BRTNODE_DISK_DATA ndd2 = NULL;
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd2, &bfe);
assert(r==0); assert(r==0);
gettimeofday(&t[1], NULL); gettimeofday(&t[1], NULL);
dt = (t[1].tv_sec - t[0].tv_sec) + ((t[1].tv_usec - t[0].tv_usec) / USECS_PER_SEC); dt = (t[1].tv_sec - t[0].tv_sec) + ((t[1].tv_usec - t[0].tv_usec) / USECS_PER_SEC);
...@@ -165,6 +167,8 @@ test_serialize_leaf(int valsize, int nelts, double entropy) { ...@@ -165,6 +167,8 @@ test_serialize_leaf(int valsize, int nelts, double entropy) {
toku_brtheader_destroy_treelock(brt_h); toku_brtheader_destroy_treelock(brt_h);
toku_free(brt_h); toku_free(brt_h);
toku_free(brt); toku_free(brt);
toku_free(ndd);
toku_free(ndd2);
r = close(fd); assert(r != -1); r = close(fd); assert(r != -1);
} }
...@@ -259,7 +263,8 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) { ...@@ -259,7 +263,8 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) {
struct timeval t[2]; struct timeval t[2];
gettimeofday(&t[0], NULL); gettimeofday(&t[0], NULL);
r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, brt->h, 1, 1, FALSE); BRTNODE_DISK_DATA ndd = NULL;
r = toku_serialize_brtnode_to(fd, make_blocknum(20), &sn, &ndd, TRUE, brt->h, 1, 1, FALSE);
assert(r==0); assert(r==0);
gettimeofday(&t[1], NULL); gettimeofday(&t[1], NULL);
double dt; double dt;
...@@ -269,7 +274,8 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) { ...@@ -269,7 +274,8 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) {
struct brtnode_fetch_extra bfe; struct brtnode_fetch_extra bfe;
fill_bfe_for_full_read(&bfe, brt_h); fill_bfe_for_full_read(&bfe, brt_h);
gettimeofday(&t[0], NULL); gettimeofday(&t[0], NULL);
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &bfe); BRTNODE_DISK_DATA ndd2 = NULL;
r = toku_deserialize_brtnode_from(fd, make_blocknum(20), 0/*pass zero for hash*/, &dn, &ndd2, &bfe);
assert(r==0); assert(r==0);
gettimeofday(&t[1], NULL); gettimeofday(&t[1], NULL);
dt = (t[1].tv_sec - t[0].tv_sec) + ((t[1].tv_usec - t[0].tv_usec) / USECS_PER_SEC); dt = (t[1].tv_sec - t[0].tv_sec) + ((t[1].tv_usec - t[0].tv_usec) / USECS_PER_SEC);
...@@ -291,6 +297,8 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) { ...@@ -291,6 +297,8 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) {
toku_brtheader_destroy_treelock(brt_h); toku_brtheader_destroy_treelock(brt_h);
toku_free(brt_h); toku_free(brt_h);
toku_free(brt); toku_free(brt);
toku_free(ndd);
toku_free(ndd2);
r = close(fd); assert(r != -1); r = close(fd); assert(r != -1);
} }
......
This diff is collapsed.
...@@ -31,11 +31,11 @@ run_test (void) { ...@@ -31,11 +31,11 @@ run_test (void) {
long s1; long s1;
long s2; long s2;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0); r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
for (int i = 0; i < 20; i++) { for (int i = 0; i < 20; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0); r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8)); assert(r==0);
} }
...@@ -47,12 +47,12 @@ run_test (void) { ...@@ -47,12 +47,12 @@ run_test (void) {
// pin 1 and 2 // pin 1 and 2
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_begin_checkpoint(ct, NULL); r = toku_cachetable_begin_checkpoint(ct, NULL);
// mark nodes as pending a checkpoint, so that get_and_pin_nonblocking on block 1 will return TOKUDB_TRY_AGAIN // mark nodes as pending a checkpoint, so that get_and_pin_nonblocking on block 1 will return TOKUDB_TRY_AGAIN
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8)); assert(r==0); r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8)); assert(r==0);
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
// now we try to pin 1, and it should get evicted out from under us // now we try to pin 1, and it should get evicted out from under us
struct unlockers foo; struct unlockers foo;
foo.extra = NULL; foo.extra = NULL;
...@@ -69,6 +69,7 @@ run_test (void) { ...@@ -69,6 +69,7 @@ run_test (void) {
def_fetch, def_fetch,
def_pf_req_callback, def_pf_req_callback,
def_pf_callback, def_pf_callback,
TRUE,
NULL, NULL,
&foo &foo
); );
......
...@@ -15,6 +15,7 @@ static void *pin_nonblocking(void *arg) { ...@@ -15,6 +15,7 @@ static void *pin_nonblocking(void *arg) {
&v1, &v1,
&s1, &s1,
def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback,
TRUE,
NULL, NULL,
NULL NULL
); );
...@@ -42,6 +43,7 @@ cachetable_test (void) { ...@@ -42,6 +43,7 @@ cachetable_test (void) {
&v1, &v1,
&s1, &s1,
def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback,
TRUE,
NULL NULL
); );
toku_pthread_t pin_nonblocking_tid; toku_pthread_t pin_nonblocking_tid;
......
...@@ -15,6 +15,7 @@ static void *pin_nonblocking(void *arg) { ...@@ -15,6 +15,7 @@ static void *pin_nonblocking(void *arg) {
&v1, &v1,
&s1, &s1,
def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback,
TRUE,
NULL, NULL,
NULL NULL
); );
...@@ -63,6 +64,7 @@ cachetable_test (void) { ...@@ -63,6 +64,7 @@ cachetable_test (void) {
&v1, &v1,
&s1, &s1,
def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback, def_write_callback(NULL), def_fetch, def_pf_req_callback, def_pf_callback,
TRUE,
NULL NULL
); );
toku_pthread_t pin_nonblocking_tid; toku_pthread_t pin_nonblocking_tid;
......
...@@ -12,12 +12,14 @@ flush (CACHEFILE f __attribute__((__unused__)), ...@@ -12,12 +12,14 @@ flush (CACHEFILE f __attribute__((__unused__)),
int UU(fd), int UU(fd),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)), void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)), void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)), PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)), PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)), BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)), BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)) BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
) { ) {
flush_called = TRUE; flush_called = TRUE;
*new_size = make_pair_attr(8); *new_size = make_pair_attr(8);
...@@ -29,7 +31,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) { ...@@ -29,7 +31,7 @@ static BOOL pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return TRUE; return TRUE;
} }
static int pf_callback(void* UU(brtnode_pv), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* sizep) { static int pf_callback(void* UU(brtnode_pv), void* UU(disk_data), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* sizep) {
assert(pf_req_called); assert(pf_req_called);
assert(flush_called); assert(flush_called);
pf_called = TRUE; pf_called = TRUE;
...@@ -52,7 +54,7 @@ cachetable_test (void) { ...@@ -52,7 +54,7 @@ cachetable_test (void) {
long s1; long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL); CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush; wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, pf_req_callback, pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, pf_req_callback, pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8)); r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
flush_called = FALSE; flush_called = FALSE;
...@@ -60,7 +62,7 @@ cachetable_test (void) { ...@@ -60,7 +62,7 @@ cachetable_test (void) {
pf_called = FALSE; pf_called = FALSE;
r = toku_cachetable_begin_checkpoint(ct, NULL); r = toku_cachetable_begin_checkpoint(ct, NULL);
assert_zero(r); assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, pf_req_callback, pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, pf_req_callback, pf_callback, TRUE, NULL);
assert_zero(r); assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8)); r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
assert_zero(r); assert_zero(r);
......
...@@ -8,12 +8,14 @@ flush (CACHEFILE f __attribute__((__unused__)), ...@@ -8,12 +8,14 @@ flush (CACHEFILE f __attribute__((__unused__)),
int UU(fd), int UU(fd),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)), void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)), void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)), PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)), PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)), BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)), BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)) BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
) { ) {
/* Do nothing */ /* Do nothing */
if (verbose) { printf("FLUSH: %d write_me %d\n", (int)k.b, w); } if (verbose) { printf("FLUSH: %d write_me %d\n", (int)k.b, w); }
...@@ -39,11 +41,9 @@ cachetable_test (void) { ...@@ -39,11 +41,9 @@ cachetable_test (void) {
long s1, s2; long s1, s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL); CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush; wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8)); r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, def_fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
// usleep (2*1024*1024);
//r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, flush, def_fetch, def_pe_est_callback, pe_callback, pf_req_callback, pf_callback, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8)); r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(8));
......
...@@ -36,12 +36,14 @@ flush ( ...@@ -36,12 +36,14 @@ flush (
int UU(fd), int UU(fd),
CACHEKEY UU(key), CACHEKEY UU(key),
void *value, void *value,
void** UU(dd),
void *UU(extraargs), void *UU(extraargs),
PAIR_ATTR size, PAIR_ATTR size,
PAIR_ATTR* UU(new_size), PAIR_ATTR* UU(new_size),
BOOL write_me, BOOL write_me,
BOOL keep_me, BOOL keep_me,
BOOL UU(for_checkpoint) BOOL UU(for_checkpoint),
BOOL UU(is_clone)
) )
{ {
// printf("f"); // printf("f");
...@@ -61,7 +63,8 @@ fetch ( ...@@ -61,7 +63,8 @@ fetch (
int UU(fd), int UU(fd),
CACHEKEY UU(key), CACHEKEY UU(key),
u_int32_t UU(fullhash), u_int32_t UU(fullhash),
void **UU(value), void **UU(value),
void **UU(dd),
PAIR_ATTR *UU(sizep), PAIR_ATTR *UU(sizep),
int *UU(dirtyp), int *UU(dirtyp),
void *UU(extraargs) void *UU(extraargs)
...@@ -84,7 +87,7 @@ do_update (void *UU(ignore)) ...@@ -84,7 +87,7 @@ do_update (void *UU(ignore))
long size; long size;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL); CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush; wc.flush_callback = flush;
int r = toku_cachetable_get_and_pin(cf, key, hi, &vv, &size, wc, fetch, def_pf_req_callback, def_pf_callback, 0); int r = toku_cachetable_get_and_pin(cf, key, hi, &vv, &size, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, 0);
//printf("g"); //printf("g");
assert(r==0); assert(r==0);
assert(size==sizeof(int)); assert(size==sizeof(int));
......
...@@ -14,12 +14,14 @@ flush (CACHEFILE f __attribute__((__unused__)), ...@@ -14,12 +14,14 @@ flush (CACHEFILE f __attribute__((__unused__)),
int UU(fd), int UU(fd),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)), void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)), void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)), PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)), PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)), BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)), BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)) BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
) { ) {
/* Do nothing */ /* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); } if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
...@@ -41,6 +43,7 @@ fetch (CACHEFILE f __attribute__((__unused__)), ...@@ -41,6 +43,7 @@ fetch (CACHEFILE f __attribute__((__unused__)),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
u_int32_t fullhash __attribute__((__unused__)), u_int32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)), void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)), PAIR_ATTR *sizep __attribute__((__unused__)),
int *dirtyp, int *dirtyp,
void *extraargs __attribute__((__unused__)) void *extraargs __attribute__((__unused__))
...@@ -73,9 +76,9 @@ cachetable_test (void) { ...@@ -73,9 +76,9 @@ cachetable_test (void) {
long s2; long s2;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(&dirty_val); CACHETABLE_WRITE_CALLBACK wc = def_write_callback(&dirty_val);
wc.flush_callback = flush; wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, &dirty_val); r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, &dirty_val);
wc.write_extraargs = NULL; wc.write_extraargs = NULL;
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
// //
// Here is the test, we have two pairs, v1 is dirty, v2 is clean, but both are currently pinned // Here is the test, we have two pairs, v1 is dirty, v2 is clean, but both are currently pinned
......
...@@ -14,12 +14,14 @@ flush (CACHEFILE f __attribute__((__unused__)), ...@@ -14,12 +14,14 @@ flush (CACHEFILE f __attribute__((__unused__)),
int UU(fd), int UU(fd),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)), void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)), void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)), PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)), PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)), BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)), BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)) BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
) { ) {
/* Do nothing */ /* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); } if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
...@@ -41,6 +43,7 @@ fetch (CACHEFILE f __attribute__((__unused__)), ...@@ -41,6 +43,7 @@ fetch (CACHEFILE f __attribute__((__unused__)),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
u_int32_t fullhash __attribute__((__unused__)), u_int32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)), void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)), PAIR_ATTR *sizep __attribute__((__unused__)),
int *dirtyp, int *dirtyp,
void *extraargs __attribute__((__unused__)) void *extraargs __attribute__((__unused__))
......
...@@ -12,7 +12,21 @@ static const int item_size = 1; ...@@ -12,7 +12,21 @@ static const int item_size = 1;
static int n_flush, n_write_me, n_keep_me, n_fetch; static int n_flush, n_write_me, n_keep_me, n_fetch;
static void flush(CACHEFILE cf, int UU(fd), CACHEKEY key, void *value, void *extraargs, PAIR_ATTR size, PAIR_ATTR* UU(new_size), BOOL write_me, BOOL keep_me, BOOL UU(for_checkpoint)) { static void flush(
CACHEFILE cf,
int UU(fd),
CACHEKEY key,
void *value,
void** UU(dd),
void *extraargs,
PAIR_ATTR size,
PAIR_ATTR* UU(new_size),
BOOL write_me,
BOOL keep_me,
BOOL UU(for_checkpoint),
BOOL UU(is_clone)
)
{
cf = cf; key = key; value = value; extraargs = extraargs; cf = cf; key = key; value = value; extraargs = extraargs;
// assert(key == make_blocknum((long)value)); // assert(key == make_blocknum((long)value));
assert(size.size == item_size); assert(size.size == item_size);
......
...@@ -12,12 +12,14 @@ flush (CACHEFILE f __attribute__((__unused__)), ...@@ -12,12 +12,14 @@ flush (CACHEFILE f __attribute__((__unused__)),
int UU(fd), int UU(fd),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)), void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)), void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)), PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)), PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)), BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)), BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)) BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
) { ) {
/* Do nothing */ /* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); } if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
...@@ -70,7 +72,7 @@ cachetable_test (void) { ...@@ -70,7 +72,7 @@ cachetable_test (void) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL); CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush; wc.flush_callback = flush;
wc.cleaner_callback = cleaner_callback; wc.cleaner_callback = cleaner_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
PAIR_ATTR attr = make_pair_attr(8); PAIR_ATTR attr = make_pair_attr(8);
attr.cache_pressure_size = 8; attr.cache_pressure_size = 8;
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, attr); r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, attr);
......
...@@ -12,12 +12,14 @@ flush (CACHEFILE f __attribute__((__unused__)), ...@@ -12,12 +12,14 @@ flush (CACHEFILE f __attribute__((__unused__)),
int UU(fd), int UU(fd),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)), void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)), void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)), PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)), PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)), BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)), BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)) BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
) { ) {
/* Do nothing */ /* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); } if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
...@@ -70,7 +72,7 @@ cachetable_test (void) { ...@@ -70,7 +72,7 @@ cachetable_test (void) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL); CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush; wc.flush_callback = flush;
wc.cleaner_callback = cleaner_callback; wc.cleaner_callback = cleaner_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
PAIR_ATTR attr = make_pair_attr(8); PAIR_ATTR attr = make_pair_attr(8);
attr.cache_pressure_size = 8; attr.cache_pressure_size = 8;
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, attr); r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, attr);
......
...@@ -11,12 +11,14 @@ flush (CACHEFILE f __attribute__((__unused__)), ...@@ -11,12 +11,14 @@ flush (CACHEFILE f __attribute__((__unused__)),
int UU(fd), int UU(fd),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)), void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)), void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)), PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)), PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)), BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)), BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)) BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
) { ) {
/* Do nothing */ /* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); } if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
...@@ -59,7 +61,7 @@ cachetable_test (void) { ...@@ -59,7 +61,7 @@ cachetable_test (void) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL); CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush; wc.flush_callback = flush;
wc.cleaner_callback = cleaner_callback; wc.cleaner_callback = cleaner_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
PAIR_ATTR attr = make_pair_attr(8); PAIR_ATTR attr = make_pair_attr(8);
attr.cache_pressure_size = 8; attr.cache_pressure_size = 8;
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, attr); r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, attr);
......
...@@ -30,12 +30,14 @@ flush (CACHEFILE f __attribute__((__unused__)), ...@@ -30,12 +30,14 @@ flush (CACHEFILE f __attribute__((__unused__)),
int UU(fd), int UU(fd),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)), void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)), void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)), PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)), PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)), BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)), BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)) BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
) { ) {
PAIR_ATTR *expect = e; PAIR_ATTR *expect = e;
if (!keep) { if (!keep) {
...@@ -85,6 +87,7 @@ run_test (void) { ...@@ -85,6 +87,7 @@ run_test (void) {
def_fetch, def_fetch,
def_pf_req_callback, def_pf_req_callback,
def_pf_callback, def_pf_callback,
TRUE,
&expect); &expect);
assert_zero(r); assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(i+1), i+1, CACHETABLE_DIRTY, attrs[i]); r = toku_cachetable_unpin(f1, make_blocknum(i+1), i+1, CACHETABLE_DIRTY, attrs[i]);
...@@ -109,6 +112,7 @@ run_test (void) { ...@@ -109,6 +112,7 @@ run_test (void) {
def_fetch, def_fetch,
def_pf_req_callback, def_pf_req_callback,
def_pf_callback, def_pf_callback,
TRUE,
&expect); &expect);
toku_cachetable_unpin(f1, make_blocknum(n_pairs + 1), n_pairs + 1, CACHETABLE_CLEAN, toku_cachetable_unpin(f1, make_blocknum(n_pairs + 1), n_pairs + 1, CACHETABLE_CLEAN,
make_pair_attr(test_limit - expect.size + 20)); make_pair_attr(test_limit - expect.size + 20));
......
...@@ -47,6 +47,7 @@ run_test (void) { ...@@ -47,6 +47,7 @@ run_test (void) {
def_fetch, def_fetch,
def_pf_req_callback, def_pf_req_callback,
def_pf_callback, def_pf_callback,
TRUE,
NULL); NULL);
assert_zero(r); assert_zero(r);
} }
......
...@@ -45,6 +45,7 @@ run_test (void) { ...@@ -45,6 +45,7 @@ run_test (void) {
def_fetch, def_fetch,
def_pf_req_callback, def_pf_req_callback,
def_pf_callback, def_pf_callback,
TRUE,
NULL); NULL);
assert_zero(r); assert_zero(r);
// set cachepressure_size to 0 // set cachepressure_size to 0
......
...@@ -52,6 +52,7 @@ run_test (void) { ...@@ -52,6 +52,7 @@ run_test (void) {
def_fetch, def_fetch,
def_pf_req_callback, def_pf_req_callback,
def_pf_callback, def_pf_callback,
TRUE,
NULL); NULL);
PAIR_ATTR attr = make_pair_attr(8); PAIR_ATTR attr = make_pair_attr(8);
attr.cache_pressure_size = 100; attr.cache_pressure_size = 100;
...@@ -63,6 +64,7 @@ run_test (void) { ...@@ -63,6 +64,7 @@ run_test (void) {
def_fetch, def_fetch,
def_pf_req_callback, def_pf_req_callback,
def_pf_callback, def_pf_callback,
TRUE,
NULL); NULL);
assert_zero(r); assert_zero(r);
// set cachepressure_size to 0 // set cachepressure_size to 0
......
...@@ -13,12 +13,14 @@ flush (CACHEFILE f __attribute__((__unused__)), ...@@ -13,12 +13,14 @@ flush (CACHEFILE f __attribute__((__unused__)),
int UU(fd), int UU(fd),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)), void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)), void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)), PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)), PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)), BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)), BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)) BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
) { ) {
/* Do nothing */ /* Do nothing */
if (check_flush && !keep) { if (check_flush && !keep) {
...@@ -36,6 +38,7 @@ fetch (CACHEFILE f __attribute__((__unused__)), ...@@ -36,6 +38,7 @@ fetch (CACHEFILE f __attribute__((__unused__)),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
u_int32_t fullhash __attribute__((__unused__)), u_int32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)), void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)), PAIR_ATTR *sizep __attribute__((__unused__)),
int *dirtyp, int *dirtyp,
void *extraargs __attribute__((__unused__)) void *extraargs __attribute__((__unused__))
...@@ -66,19 +69,19 @@ cachetable_test (void) { ...@@ -66,19 +69,19 @@ cachetable_test (void) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL); CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush; wc.flush_callback = flush;
for (int i = 0; i < 100000; i++) { for (int i = 0; i < 100000; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(1)); r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(1));
} }
for (int i = 0; i < 8; i++) { for (int i = 0; i < 8; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(1)); r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(1));
} }
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(1)); r = toku_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(1));
} }
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(1)); r = toku_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(1));
} }
flush_may_occur = TRUE; flush_may_occur = TRUE;
......
...@@ -10,12 +10,14 @@ flush (CACHEFILE f __attribute__((__unused__)), ...@@ -10,12 +10,14 @@ flush (CACHEFILE f __attribute__((__unused__)),
int UU(fd), int UU(fd),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
void *v, void *v,
void** UU(dd),
void *e __attribute__((__unused__)), void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)), PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)), PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)), BOOL w __attribute__((__unused__)),
BOOL keep, BOOL keep,
BOOL c __attribute__((__unused__)) BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
) { ) {
assert(flush_may_occur); assert(flush_may_occur);
if (!keep) { if (!keep) {
...@@ -31,6 +33,7 @@ fetch (CACHEFILE f __attribute__((__unused__)), ...@@ -31,6 +33,7 @@ fetch (CACHEFILE f __attribute__((__unused__)),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
u_int32_t fullhash __attribute__((__unused__)), u_int32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)), void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)), PAIR_ATTR *sizep __attribute__((__unused__)),
int *dirtyp, int *dirtyp,
void *extraargs __attribute__((__unused__)) void *extraargs __attribute__((__unused__))
...@@ -48,12 +51,14 @@ other_flush (CACHEFILE f __attribute__((__unused__)), ...@@ -48,12 +51,14 @@ other_flush (CACHEFILE f __attribute__((__unused__)),
int UU(fd), int UU(fd),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)), void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)), void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)), PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)), PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)), BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)), BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)) BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
) { ) {
} }
...@@ -103,28 +108,28 @@ cachetable_test (void) { ...@@ -103,28 +108,28 @@ cachetable_test (void) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL); CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush; wc.flush_callback = flush;
wc.pe_callback = pe_callback; wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(4)); r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(4));
} }
for (int i = 0; i < 8; i++) { for (int i = 0; i < 8; i++) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL); CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush; wc.flush_callback = flush;
wc.pe_callback = pe_callback; wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(4)); r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(4));
} }
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL); CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush; wc.flush_callback = flush;
wc.pe_callback = pe_callback; wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(4)); r = toku_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(4));
} }
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL); CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush; wc.flush_callback = flush;
wc.pe_callback = pe_callback; wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(4)); r = toku_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(4));
} }
flush_may_occur = FALSE; flush_may_occur = FALSE;
......
...@@ -10,12 +10,14 @@ flush (CACHEFILE f __attribute__((__unused__)), ...@@ -10,12 +10,14 @@ flush (CACHEFILE f __attribute__((__unused__)),
int UU(fd), int UU(fd),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
void* UU(v), void* UU(v),
void** UU(dd),
void *e __attribute__((__unused__)), void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)), PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)), PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)), BOOL w __attribute__((__unused__)),
BOOL keep, BOOL keep,
BOOL c __attribute__((__unused__)) BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
) { ) {
assert(flush_may_occur); assert(flush_may_occur);
if (!keep) { if (!keep) {
...@@ -31,6 +33,7 @@ fetch (CACHEFILE f __attribute__((__unused__)), ...@@ -31,6 +33,7 @@ fetch (CACHEFILE f __attribute__((__unused__)),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
u_int32_t fullhash __attribute__((__unused__)), u_int32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)), void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)), PAIR_ATTR *sizep __attribute__((__unused__)),
int *dirtyp, int *dirtyp,
void *extraargs __attribute__((__unused__)) void *extraargs __attribute__((__unused__))
...@@ -48,18 +51,21 @@ other_flush (CACHEFILE f __attribute__((__unused__)), ...@@ -48,18 +51,21 @@ other_flush (CACHEFILE f __attribute__((__unused__)),
int UU(fd), int UU(fd),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)), void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)), void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)), PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)), PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)), BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)), BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)) BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
) { ) {
} }
static void static void
pe_est_callback( pe_est_callback(
void* UU(brtnode_pv), void* UU(brtnode_pv),
void* UU(dd),
long* bytes_freed_estimate, long* bytes_freed_estimate,
enum partial_eviction_cost *cost, enum partial_eviction_cost *cost,
void* UU(write_extraargs) void* UU(write_extraargs)
...@@ -118,7 +124,7 @@ cachetable_test (void) { ...@@ -118,7 +124,7 @@ cachetable_test (void) {
wc.flush_callback = flush; wc.flush_callback = flush;
wc.pe_est_callback = pe_est_callback; wc.pe_est_callback = pe_est_callback;
wc.pe_callback = pe_callback; wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(4)); r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(4));
} }
for (int i = 0; i < 8; i++) { for (int i = 0; i < 8; i++) {
...@@ -126,7 +132,7 @@ cachetable_test (void) { ...@@ -126,7 +132,7 @@ cachetable_test (void) {
wc.flush_callback = flush; wc.flush_callback = flush;
wc.pe_est_callback = pe_est_callback; wc.pe_est_callback = pe_est_callback;
wc.pe_callback = pe_callback; wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(4)); r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(4));
} }
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
...@@ -134,7 +140,7 @@ cachetable_test (void) { ...@@ -134,7 +140,7 @@ cachetable_test (void) {
wc.flush_callback = flush; wc.flush_callback = flush;
wc.pe_est_callback = pe_est_callback; wc.pe_est_callback = pe_est_callback;
wc.pe_callback = pe_callback; wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(4)); r = toku_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(4));
} }
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
...@@ -142,7 +148,7 @@ cachetable_test (void) { ...@@ -142,7 +148,7 @@ cachetable_test (void) {
wc.flush_callback = flush; wc.flush_callback = flush;
wc.pe_est_callback = pe_est_callback; wc.pe_est_callback = pe_est_callback;
wc.pe_callback = pe_callback; wc.pe_callback = pe_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(4)); r = toku_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(4));
} }
flush_may_occur = FALSE; flush_may_occur = FALSE;
......
...@@ -23,12 +23,14 @@ flush (CACHEFILE f __attribute__((__unused__)), ...@@ -23,12 +23,14 @@ flush (CACHEFILE f __attribute__((__unused__)),
int UU(fd), int UU(fd),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)), void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)), void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)), PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)), PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)), BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)), BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)) BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
) { ) {
/* Do nothing */ /* Do nothing */
if (check_flush && !keep) { if (check_flush && !keep) {
...@@ -46,6 +48,7 @@ fetch (CACHEFILE f __attribute__((__unused__)), ...@@ -46,6 +48,7 @@ fetch (CACHEFILE f __attribute__((__unused__)),
CACHEKEY k __attribute__((__unused__)), CACHEKEY k __attribute__((__unused__)),
u_int32_t fullhash __attribute__((__unused__)), u_int32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)), void **value __attribute__((__unused__)),
void** UU(dd),
PAIR_ATTR *sizep __attribute__((__unused__)), PAIR_ATTR *sizep __attribute__((__unused__)),
int *dirtyp, int *dirtyp,
void *extraargs __attribute__((__unused__)) void *extraargs __attribute__((__unused__))
...@@ -58,7 +61,8 @@ fetch (CACHEFILE f __attribute__((__unused__)), ...@@ -58,7 +61,8 @@ fetch (CACHEFILE f __attribute__((__unused__)),
static void static void
pe_est_callback( pe_est_callback(
void* UU(brtnode_pv), void* UU(brtnode_pv),
void* UU(dd),
long* bytes_freed_estimate, long* bytes_freed_estimate,
enum partial_eviction_cost *cost, enum partial_eviction_cost *cost,
void* UU(write_extraargs) void* UU(write_extraargs)
...@@ -104,19 +108,19 @@ cachetable_test (void) { ...@@ -104,19 +108,19 @@ cachetable_test (void) {
wc.pe_est_callback = pe_est_callback; wc.pe_est_callback = pe_est_callback;
wc.pe_callback = pe_callback; wc.pe_callback = pe_callback;
for (int i = 0; i < 100000; i++) { for (int i = 0; i < 100000; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(1)); r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(1));
} }
for (int i = 0; i < 8; i++) { for (int i = 0; i < 8; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(2), 2, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(1)); r = toku_cachetable_unpin(f1, make_blocknum(2), 2, CACHETABLE_CLEAN, make_pair_attr(1));
} }
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(3), 3, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(1)); r = toku_cachetable_unpin(f1, make_blocknum(3), 3, CACHETABLE_CLEAN, make_pair_attr(1));
} }
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(4), 4, &v2, &s2, wc, fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(1)); r = toku_cachetable_unpin(f1, make_blocknum(4), 4, CACHETABLE_CLEAN, make_pair_attr(1));
} }
flush_may_occur = TRUE; flush_may_occur = TRUE;
......
#ident "$Id: cachetable-simple-verify.c 39504 2012-02-03 16:19:33Z zardosht $"
#ident "Copyright (c) 2007-2011 Tokutek Inc. All rights reserved."
#include "includes.h"
#include "test.h"
static void
clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, BOOL UU(for_checkpoint), void* UU(write_extraargs))
{
*cloned_value_data = (void *)1;
new_attr->is_valid = FALSE;
}
BOOL clone_flush_started;
BOOL clone_flush_completed;
CACHETABLE ct;
static void
flush (
CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)),
BOOL is_clone
)
{
if (is_clone) {
clone_flush_started = TRUE;
usleep(4*1024*1024);
clone_flush_completed = TRUE;
}
}
static void *run_end_checkpoint(void *arg) {
int r = toku_cachetable_end_checkpoint(
ct,
NULL,
fake_ydb_lock,
fake_ydb_unlock,
NULL,
NULL
);
assert_zero(r);
return arg;
}
//
// this test verifies that a PAIR that undergoes a checkpoint on the checkpoint thread is still pinnable while being written out
//
static void
cachetable_test (void) {
const int test_limit = 200;
int r;
ct = NULL;
r = toku_create_cachetable(&ct, test_limit, ZERO_LSN, NULL_LOGGER); assert(r == 0);
char fname1[] = __FILE__ "test1.dat";
unlink(fname1);
CACHEFILE f1;
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush;
wc.clone_callback = clone_callback;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
assert_zero(r);
r = toku_cachetable_begin_checkpoint(ct, NULL);
clone_flush_started = FALSE;
clone_flush_completed = FALSE;
toku_pthread_t checkpoint_tid;
r = toku_pthread_create(&checkpoint_tid, NULL, run_end_checkpoint, NULL);
assert_zero(r);
usleep(1*1024*1024);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
assert_zero(r);
assert(clone_flush_started && !clone_flush_completed);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert_zero(r);
void *ret;
r = toku_pthread_join(checkpoint_tid, &ret);
assert_zero(r);
assert(clone_flush_started && clone_flush_completed);
toku_cachetable_verify(ct);
r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0 && f1 == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
int
test_main(int argc, const char *argv[]) {
default_parse_args(argc, argv);
cachetable_test();
return 0;
}
#ident "$Id: cachetable-simple-verify.c 39504 2012-02-03 16:19:33Z zardosht $"
#ident "Copyright (c) 2007-2011 Tokutek Inc. All rights reserved."
#include "includes.h"
#include "test.h"
BOOL flush_completed;
BOOL pf_called;
static void
clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, BOOL UU(for_checkpoint), void* UU(write_extraargs))
{
*cloned_value_data = (void *)1;
new_attr->is_valid = FALSE;
}
static void
flush (
CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
)
{
if (is_clone) {
usleep(2*1024*1024);
flush_completed = TRUE;
}
}
static int true_pf_callback(void* UU(brtnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* sizep) {
assert(flush_completed);
pf_called = TRUE;
*sizep = make_pair_attr(9);
return 0;
}
// this test verifies that a partial fetch will wait for a cloned pair to complete
// writing to disk
static void
cachetable_test (void) {
const int test_limit = 12;
int r;
CACHETABLE ct;
r = toku_create_cachetable(&ct, test_limit, ZERO_LSN, NULL_LOGGER); assert(r == 0);
char fname1[] = __FILE__ "test1.dat";
unlink(fname1);
CACHEFILE f1;
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.clone_callback = clone_callback;
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
assert_zero(r);
flush_completed = FALSE;
r = toku_cachetable_begin_checkpoint(ct, NULL); assert_zero(r);
assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert_zero(r);
pf_called = FALSE;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
assert_zero(r);
assert(!pf_called);
toku_cachetable_pf_pinned_pair(v1, true_pf_callback, NULL, f1, make_blocknum(1), 1);
assert(pf_called);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert_zero(r);
assert(pf_called);
r = toku_cachetable_end_checkpoint(
ct,
NULL,
fake_ydb_lock,
fake_ydb_unlock,
NULL,
NULL
);
assert_zero(r);
toku_cachetable_verify(ct);
r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0 && f1 == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
int
test_main(int argc, const char *argv[]) {
default_parse_args(argc, argv);
cachetable_test();
return 0;
}
#ident "$Id: cachetable-simple-verify.c 39504 2012-02-03 16:19:33Z zardosht $"
#ident "Copyright (c) 2007-2011 Tokutek Inc. All rights reserved."
#include "includes.h"
#include "test.h"
BOOL flush_completed;
BOOL pf_called;
static void
clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, BOOL UU(for_checkpoint), void* UU(write_extraargs))
{
*cloned_value_data = (void *)1;
new_attr->is_valid = FALSE;
}
static void
flush (
CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
)
{
if (is_clone) {
usleep(2*1024*1024);
flush_completed = TRUE;
}
}
static BOOL true_pf_req_callback(void* UU(brtnode_pv), void* UU(read_extraargs)) {
return TRUE;
}
static int true_pf_callback(void* UU(brtnode_pv), void* UU(dd), void* UU(read_extraargs), int UU(fd), PAIR_ATTR* sizep) {
assert(flush_completed);
pf_called = TRUE;
*sizep = make_pair_attr(9);
return 0;
}
// this test verifies that a partial fetch will wait for a cloned pair to complete
// writing to disk
static void
cachetable_test (void) {
const int test_limit = 12;
int r;
CACHETABLE ct;
r = toku_create_cachetable(&ct, test_limit, ZERO_LSN, NULL_LOGGER); assert(r == 0);
char fname1[] = __FILE__ "test1.dat";
unlink(fname1);
CACHEFILE f1;
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.clone_callback = clone_callback;
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
assert_zero(r);
flush_completed = FALSE;
r = toku_cachetable_begin_checkpoint(ct, NULL); assert_zero(r);
assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert_zero(r);
pf_called = FALSE;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, true_pf_req_callback, true_pf_callback, TRUE, NULL);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert_zero(r);
assert(pf_called);
r = toku_cachetable_end_checkpoint(
ct,
NULL,
fake_ydb_lock,
fake_ydb_unlock,
NULL,
NULL
);
assert_zero(r);
toku_cachetable_verify(ct);
r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0 && f1 == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
int
test_main(int argc, const char *argv[]) {
default_parse_args(argc, argv);
cachetable_test();
return 0;
}
#ident "$Id: cachetable-simple-verify.c 39504 2012-02-03 16:19:33Z zardosht $"
#ident "Copyright (c) 2007-2011 Tokutek Inc. All rights reserved."
#include "includes.h"
#include "test.h"
static void
clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, BOOL UU(for_checkpoint), void* UU(write_extraargs))
{
*cloned_value_data = (void *)1;
new_attr->is_valid = FALSE;
}
static void
flush (
CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
)
{
}
// this test verifies that a partial fetch will wait for a cloned pair to complete
// writing to disk
static void
cachetable_test (enum cachetable_dirty dirty, BOOL cloneable) {
const int test_limit = 12;
int r;
CACHETABLE ct;
r = toku_create_cachetable(&ct, test_limit, ZERO_LSN, NULL_LOGGER); assert(r == 0);
char fname1[] = __FILE__ "test1.dat";
unlink(fname1);
CACHEFILE f1;
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.clone_callback = cloneable ? clone_callback : NULL;
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, dirty, make_pair_attr(8));
// test that having a pin that passes FALSE for may_modify_value does not stall behind checkpoint
r = toku_cachetable_begin_checkpoint(ct, NULL); assert_zero(r);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, FALSE, NULL, NULL);
assert(r == 0);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
assert(r == 0);
r = toku_cachetable_get_and_pin_nonblocking(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL, NULL);
if (dirty == CACHETABLE_DIRTY && !cloneable) {
assert(r == TOKUDB_TRY_AGAIN);
}
else {
assert(r == 0);
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
}
r = toku_cachetable_end_checkpoint(
ct,
NULL,
fake_ydb_lock,
fake_ydb_unlock,
NULL,
NULL
);
assert_zero(r);
toku_cachetable_verify(ct);
r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0 && f1 == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
int
test_main(int argc, const char *argv[]) {
default_parse_args(argc, argv);
cachetable_test(CACHETABLE_DIRTY, TRUE);
cachetable_test(CACHETABLE_DIRTY, FALSE);
cachetable_test(CACHETABLE_CLEAN, TRUE);
cachetable_test(CACHETABLE_CLEAN, FALSE);
return 0;
}
#ident "$Id: cachetable-simple-verify.c 39504 2012-02-03 16:19:33Z zardosht $"
#ident "Copyright (c) 2007-2011 Tokutek Inc. All rights reserved."
#include "includes.h"
#include "test.h"
BOOL flush_completed;
BOOL evict_called;
static void
clone_callback(void* UU(value_data), void** cloned_value_data, PAIR_ATTR* new_attr, BOOL UU(for_checkpoint), void* UU(write_extraargs))
{
*cloned_value_data = (void *)1;
new_attr->is_valid = FALSE;
}
static void
flush (
CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)),
void** UU(dd),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__)),
BOOL UU(is_clone)
)
{
if (is_clone) {
usleep(2*1024*1024);
flush_completed = TRUE;
}
else if (!keep && !is_clone) {
assert(flush_completed);
evict_called = TRUE;
}
}
// this test verifies that a partial fetch will wait for a cloned pair to complete
// writing to disk
static void
cachetable_test (void) {
const int test_limit = 12;
int r;
CACHETABLE ct;
r = toku_create_cachetable(&ct, test_limit, ZERO_LSN, NULL_LOGGER); assert(r == 0);
char fname1[] = __FILE__ "test1.dat";
unlink(fname1);
CACHEFILE f1;
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
long s1;
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.clone_callback = clone_callback;
wc.flush_callback = flush;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
assert_zero(r);
r = toku_cachetable_unpin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), CACHETABLE_DIRTY, make_pair_attr(8));
assert_zero(r);
flush_completed = FALSE;
evict_called = FALSE;
r = toku_cachetable_begin_checkpoint(ct, NULL); assert_zero(r);
assert_zero(r);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), toku_cachetable_hash(f1, make_blocknum(1)), &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, TRUE, NULL);
assert_zero(r);
r = toku_cachetable_unpin_and_remove(f1, make_blocknum(1), NULL, NULL);
assert_zero(r);
r = toku_cachetable_end_checkpoint(
ct,
NULL,
fake_ydb_lock,
fake_ydb_unlock,
NULL,
NULL
);
assert_zero(r);
toku_cachetable_verify(ct);
r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0 && f1 == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
int
test_main(int argc, const char *argv[]) {
default_parse_args(argc, argv);
cachetable_test();
return 0;
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment