Commit e2ebbb1f authored by Bradley C. Kuszmaul's avatar Bradley C. Kuszmaul

Rework the way transactions ids are used in the log (xid's on messages are...

Rework the way transactions ids are used in the log (xid's on messages are used when inserting something into a leaf.)  Addresses #27.

git-svn-id: file:///svn/tokudb@2199 c7de825b-a66e-492c-adef-691d508d4ae1
parent 549c2c28
......@@ -396,7 +396,7 @@ int toku_deserialize_brtnode_from (int fd, DISKOFF off, BRTNODE *brtnode, int fl
}
if (n_in_buf > 0) {
u_int32_t actual_sum = 0;
r = toku_pma_bulk_insert((TOKUTXN)0, (FILENUM){0}, (DISKOFF)0, result->u.l.buffer, keys, vals, n_in_buf, result->rand4fingerprint, &actual_sum, 0);
r = toku_pma_bulk_insert((TOKULOGGER)0, (FILENUM){0}, (DISKOFF)0, result->u.l.buffer, keys, vals, n_in_buf, result->rand4fingerprint, &actual_sum, 0);
if (r!=0) goto died_21;
if (actual_sum!=result->local_fingerprint) {
//fprintf(stderr, "%s:%d Corrupted checksum stored=%08x rand=%08x actual=%08x height=%d n_keys=%d\n", __FILE__, __LINE__, result->rand4fingerprint, result->local_fingerprint, actual_sum, result->height, n_in_buf);
......
This diff is collapsed.
......@@ -214,7 +214,13 @@ int toku_logger_finish (TOKULOGGER logger, struct wbuf *wbuf) {
int toku_logger_commit (TOKUTXN txn, int nosync) {
// panic handled in log_commit
int r = toku_log_commit(txn, txn->txnid64, nosync);
int r = toku_log_commit(txn->logger, txn->txnid64);
if (r!=0) goto free_and_return;
if (txn->parent && !nosync) {
r = toku_logger_fsync(txn->logger);
if (r!=0) toku_logger_panic(txn->logger, r);
}
free_and_return: /*nothing*/;
struct log_entry *item;
while ((item=txn->oldest_logentry)) {
txn->oldest_logentry = item->next;
......@@ -257,7 +263,7 @@ int toku_logger_log_fcreate (TOKUTXN txn, const char *fname, int mode) {
BYTESTRING bs;
bs.len = strlen(fname);
bs.data = (char*)fname;
return toku_log_fcreate (txn, toku_txn_get_txnid(txn), bs, mode);
return toku_log_fcreate (txn->logger, toku_txn_get_txnid(txn), bs, mode);
}
/* fopen isn't really an action. It's just for bookkeeping. We need to know the filename that goes with a filenum. */
......@@ -267,7 +273,7 @@ int toku_logger_log_fopen (TOKUTXN txn, const char * fname, FILENUM filenum) {
BYTESTRING bs;
bs.len = strlen(fname);
bs.data = (char*)fname;
return toku_log_fopen (txn,toku_txn_get_txnid(txn), bs, filenum);
return toku_log_fopen (txn->logger, toku_txn_get_txnid(txn), bs, filenum);
}
......@@ -566,6 +572,15 @@ LSN toku_txn_get_last_lsn (TOKUTXN txn) {
if (txn==0) return (LSN){0};
return txn->last_lsn;
}
LSN toku_logger_last_lsn(TOKULOGGER logger) {
LSN result=logger->lsn;
result.lsn--;
return result;
}
TOKULOGGER toku_txn_logger (TOKUTXN txn) {
return txn ? txn->logger : 0;
}
int toku_abort_logentry_commit (struct logtype_commit *le __attribute__((__unused__)), TOKUTXN txn) {
toku_logger_panic(txn->logger, EINVAL);
......
......@@ -18,6 +18,7 @@ int toku_logger_log_checkpoint (TOKULOGGER, LSN*);
void toku_logger_panic(TOKULOGGER, int/*err*/);
int toku_logger_panicked(TOKULOGGER /*logger*/);
int toku_logger_is_open(TOKULOGGER);
LSN toku_logger_last_lsn(TOKULOGGER);
int toku_logger_log_phys_add_or_delete_in_leaf (DB *db, TOKUTXN txn, DISKOFF diskoff, int is_add, const struct kv_pair *pair);
......@@ -62,6 +63,8 @@ int toku_read_and_print_logmagic (FILE *f, u_int32_t *version);
TXNID toku_txn_get_txnid (TOKUTXN);
LSN toku_txn_get_last_lsn (TOKUTXN);
TOKULOGGER toku_txn_logger (TOKUTXN txn);
static inline int toku_copy_FILENUM(FILENUM *target, FILENUM val) { *target = val; return 0; }
static inline void toku_free_FILENUM(FILENUM val __attribute__((__unused__))) {}
......
......@@ -53,61 +53,52 @@ const struct logtype logtypes[] = {
{"FILENUM", "filenum", 0},
{"LOGGEDBRTHEADER", "header", 0},
NULLFIELD}},
{"newbrtnode", 'N', FA{{"TXNID", "txnid", 0},
{"FILENUM", "filenum", 0},
{"newbrtnode", 'N', FA{{"FILENUM", "filenum", 0},
{"DISKOFF", "diskoff", 0},
{"u_int32_t", "height", 0},
{"u_int32_t", "nodesize", 0},
{"u_int8_t", "is_dup_sort", 0},
{"u_int32_t", "rand4fingerprint", "%08x"},
NULLFIELD}},
{"changechildfingerprint", 'f', FA{{"TXNID", "txnid", 0},
{"FILENUM", "filenum", 0},
{"changechildfingerprint", 'f', FA{{"FILENUM", "filenum", 0},
{"DISKOFF", "diskoff", 0},
{"u_int32_t", "childnum", 0},
{"u_int32_t", "oldfingerprint", "%08x"},
{"u_int32_t", "newfingerprint", "%08x"},
NULLFIELD}},
{"changeunnamedroot", 'u', FA{{"TXNID", "txnid", 0},
{"FILENUM", "filenum", 0},
{"DISKOFF", "oldroot", 0},
{"DISKOFF", "newroot", 0},
NULLFIELD}},
{"changenamedroot", 'n', FA{{"TXNID", "txnid", 0},
{"FILENUM", "filenum", 0},
{"changeunnamedroot", 'u', FA{{"FILENUM", "filenum", 0},
{"DISKOFF", "oldroot", 0},
{"DISKOFF", "newroot", 0},
NULLFIELD}},
{"changenamedroot", 'n', FA{{"FILENUM", "filenum", 0},
{"BYTESTRING", "name", 0},
{"DISKOFF", "oldroot", 0},
{"DISKOFF", "newroot", 0},
NULLFIELD}},
{"changeunusedmemory", 'm', FA{{"TXNID", "txnid", 0},
{"FILENUM", "filenum", 0},
{"changeunusedmemory", 'm', FA{{"FILENUM", "filenum", 0},
{"DISKOFF", "oldunused", 0},
{"DISKOFF", "newunused", 0},
NULLFIELD}},
{"addchild", 'c', FA{{"TXNID", "txnid", 0},
{"FILENUM", "filenum", 0},
{"addchild", 'c', FA{{"FILENUM", "filenum", 0},
{"DISKOFF", "diskoff", 0},
{"u_int32_t", "childnum", 0}, // children scoot over
{"DISKOFF", "child", 0},
{"u_int32_t", "childfingerprint", "%08x"},
NULLFIELD}},
{"delchild", 'r', FA{{"TXNID", "txnid", 0},
{"FILENUM", "filenum", 0},
{"delchild", 'r', FA{{"FILENUM", "filenum", 0},
{"DISKOFF", "diskoff", 0},
{"u_int32_t", "childnum", 0}, // children scoot over
{"DISKOFF", "child", 0},
{"u_int32_t", "childfingerprint", "%08x"},
{"BYTESTRING", "pivotkey", 0},
NULLFIELD}},
{"setchild", 'i', FA{{"TXNID", "txnid", 0},
{"FILENUM", "filenum", 0},
{"setchild", 'i', FA{{"FILENUM", "filenum", 0},
{"DISKOFF", "diskoff", 0},
{"u_int32_t", "childnum", 0},
{"DISKOFF", "oldchild", 0},
{"DISKOFF", "newchild", 0},
NULLFIELD}},
{"setpivot", 'k', FA{{"TXNID", "txnid", 0},
{"FILENUM", "filenum", 0},
{"setpivot", 'k', FA{{"FILENUM", "filenum", 0},
{"DISKOFF", "diskoff", 0},
{"u_int32_t", "childnum", 0},
{"BYTESTRING", "pivotkey", 0},
......@@ -116,20 +107,20 @@ const struct logtype logtypes[] = {
{"BYTESTRING", "fname", 0},
{"FILENUM", "filenum", 0},
NULLFIELD}},
{"brtdeq", 'U', FA{{"TXNID", "txnid", 0},
{"FILENUM", "filenum", 0},
{"brtdeq", 'U', FA{{"FILENUM", "filenum", 0},
{"DISKOFF", "diskoff", 0},
{"u_int32_t", "childnum", 0},
{"TXNID", "xid", 0},
{"u_int32_t", "typ", 0},
{"BYTESTRING", "key", 0},
{"BYTESTRING", "data", 0},
{"u_int32_t", "oldfingerprint", "%08x"},
{"u_int32_t", "newfingerprint", "%08x"},
NULLFIELD}},
{"brtenq", 'Q', FA{{"TXNID", "txnid", 0},
{"FILENUM", "filenum", 0},
{"brtenq", 'Q', FA{{"FILENUM", "filenum", 0},
{"DISKOFF", "diskoff", 0},
{"u_int32_t", "childnum", 0},
{"TXNID", "xid", 0},
{"u_int32_t", "typ", 0},
{"BYTESTRING", "key", 0},
{"BYTESTRING", "data", 0},
......@@ -150,14 +141,12 @@ const struct logtype logtypes[] = {
{"BYTESTRING", "key", 0},
{"BYTESTRING", "data", 0},
NULLFIELD}},
{"resizepma", 'R', FA{{"TXNID", "txnid", 0},
{"FILENUM", "filenum", 0},
{"resizepma", 'R', FA{{"FILENUM", "filenum", 0},
{"DISKOFF", "diskoff", 0},
{"u_int32_t", "oldsize", 0},
{"u_int32_t", "newsize", 0},
NULLFIELD}},
{"pmadistribute", 'M', FA{{"TXNID", "txnid", 0},
{"FILENUM", "filenum", 0},
{"pmadistribute", 'M', FA{{"FILENUM", "filenum", 0},
{"DISKOFF", "old_diskoff", 0},
{"DISKOFF", "new_diskoff", 0},
{"INTPAIRARRAY", "fromto", 0},
......@@ -252,15 +241,12 @@ void generate_log_free(void) {
void generate_log_writer (void) {
DO_LOGTYPES(lt, ({
fprintf2(cf, hf, "int toku_log_%s (TOKUTXN txn", lt->name);
fprintf2(cf, hf, "int toku_log_%s (TOKULOGGER logger", lt->name);
DO_FIELDS(ft, lt,
fprintf2(cf, hf, ", %s %s", ft->type, ft->name));
if (lt->command=='C') {
fprintf2(cf,hf, ", int nosync");
}
fprintf(hf, ");\n");
fprintf(cf, ") {\n");
fprintf(cf, " if (txn==0) return 0;\n");
fprintf(cf, " if (logger==0) return 0;\n");
fprintf(cf, " const unsigned int buflen= (+4 // len at the beginning\n");
fprintf(cf, " +1 // log command\n");
fprintf(cf, " +8 // lsn\n");
......@@ -274,48 +260,15 @@ void generate_log_writer (void) {
fprintf(cf, " wbuf_init(&wbuf, buf, buflen);\n");
fprintf(cf, " wbuf_int(&wbuf, buflen);\n");
fprintf(cf, " wbuf_char(&wbuf, '%c');\n", lt->command);
fprintf(cf, " wbuf_LSN(&wbuf, txn->logger->lsn);\n");
fprintf(cf, " txn->last_lsn = txn->logger->lsn;\n");
fprintf(cf, " txn->logger->lsn.lsn++;\n");
fprintf(cf, " wbuf_LSN(&wbuf, logger->lsn);\n");
fprintf(cf, " logger->lsn.lsn++;\n");
DO_FIELDS(ft, lt,
fprintf(cf, " wbuf_%s(&wbuf, %s);\n", ft->type, ft->name));
fprintf(cf, " int r= toku_logger_finish(txn->logger, &wbuf);\n");
fprintf(cf, " int r= toku_logger_finish(logger, &wbuf);\n");
fprintf(cf, " assert(wbuf.ndone==buflen);\n");
fprintf(cf, " toku_free(buf);\n");
if (lt->command=='C') {
fprintf(cf, " if (r!=0) return r;\n");
fprintf(cf, " // commit has some extra work to do.\n");
fprintf(cf, " if (nosync) return 0;\n");
fprintf(cf, " if (txn->parent) { // do not fsync if there is a parent. Instead append the log entries onto the parent.\n");
fprintf(cf, " if (txn->parent->oldest_logentry) txn->parent->newest_logentry->next = txn->oldest_logentry;\n");
fprintf(cf, " else txn->parent->oldest_logentry = txn->oldest_logentry;\n");
fprintf(cf, " if (txn->newest_logentry) txn->parent->newest_logentry = txn->newest_logentry;\n");
fprintf(cf, " txn->newest_logentry = txn->oldest_logentry = 0;\n");
fprintf(cf, " } else {\n");
fprintf(cf, " r = toku_logger_fsync(txn->logger);\n");
fprintf(cf, " if (r!=0) toku_logger_panic(txn->logger, r);\n");
fprintf(cf, " }\n");
fprintf(cf, " return 0;\n");
} else {
int i=0;
fprintf(cf, " struct log_entry *MALLOC(lentry);\n");
fprintf(cf, " if (lentry==0) return errno;\n");
fprintf(cf, " if (0) { died0: toku_free(lentry); return r; }\n");
fprintf(cf, " lentry->cmd = %d;\n", lt->command);
fprintf(cf, " lentry->u.%s.lsn = toku_txn_get_last_lsn(txn);\n", lt->name);
DO_FIELDS(ft, lt,
({
fprintf(cf, " r=toku_copy_%s(&lentry->u.%s.%s, %s);\n", ft->type, lt->name, ft->name, ft->name);
fprintf(cf, " if (r!=0) { if(0) { died%d: toku_free_%s(lentry->u.%s.%s); } goto died%d; }\n", i+1, ft->type, lt->name, ft->name, i);
i++;
}));
fprintf(cf, " if (0) { goto died%d; }\n", i); // Need to use that label.
fprintf(cf, " lentry->next = 0;\n");
fprintf(cf, " if (txn->oldest_logentry==0) txn->oldest_logentry = lentry;\n");
fprintf(cf, " else txn->newest_logentry->next = lentry;\n");
fprintf(cf, " txn->newest_logentry = lentry;\n");
fprintf(cf, " return r;\n");
}
fprintf(cf, " return r;\n");
fprintf(cf, "}\n\n");
}));
......
......@@ -28,9 +28,9 @@ struct pma {
int toku_pmainternal_count_region (struct kv_pair *pairs[], int lo, int hi);
void toku_pmainternal_calculate_parameters (PMA pma);
int toku_pmainternal_smooth_region (TOKUTXN, FILENUM, DISKOFF, struct kv_pair */*pairs*/[], int /*n*/, int /*idx*/, int /*base*/, PMA /*pma*/, int */*new_idx*/, LSN */*node_lsn*/);
int toku_pmainternal_smooth_region (TOKULOGGER, FILENUM, DISKOFF, struct kv_pair */*pairs*/[], int /*n*/, int /*idx*/, int /*base*/, PMA /*pma*/, int */*new_idx*/, LSN */*node_lsn*/);
int toku_pmainternal_printpairs (struct kv_pair *pairs[], int N);
int toku_pmainternal_make_space_at (TOKUTXN, FILENUM, DISKOFF, PMA pma, int idx, unsigned int *new_index, LSN *node_lsn);
int toku_pmainternal_make_space_at (TOKULOGGER, FILENUM, DISKOFF, PMA pma, int idx, unsigned int *new_index, LSN *node_lsn);
int toku_pmainternal_find (PMA pma, DBT *); // The DB is so the comparison fuction can be called.
void toku_print_pma (PMA pma); /* useful for debugging, so keep the name short. I.e., not pmainternal_print_pma() */
......
......@@ -16,12 +16,14 @@
/* we use pma cursors for testing the pma_search function. otherwise, there are no pma cursors */
#include "pma-cursor.h"
static TOKULOGGER const null_logger = 0;
static TOKUTXN const null_txn = 0;
static DB * const null_db = 0;
static const DISKOFF null_diskoff = -1;
static const FILENUM null_filenum = {0};
static TXNID const null_txnid = 0;
#define NULL_ARGS null_txn, null_filenum, null_diskoff
#define NULL_ARGS null_logger, null_txnid, null_filenum, null_diskoff
void *skey=0, *sval=0;
......@@ -47,7 +49,7 @@ static void test_make_space_at (void) {
r=toku_pma_create(&pma, toku_default_compare_fun, null_db, null_filenum, 0);
assert(r==0);
assert(toku_pma_n_entries(pma)==0);
r=toku_pmainternal_make_space_at(null_txn, null_filenum, null_diskoff, pma, 2, &newi, (LSN*)0);
r=toku_pmainternal_make_space_at(null_logger, null_filenum, null_diskoff, pma, 2, &newi, (LSN*)0);
assert(r==0);
assert(toku_pma_index_limit(pma)==4);
assert((unsigned long)pma->pairs[toku_pma_index_limit(pma)]==0xdeadbeefL);
......@@ -55,7 +57,7 @@ static void test_make_space_at (void) {
pma->pairs[2] = key_A;
pma->n_pairs_present++;
r=toku_pmainternal_make_space_at(null_txn, null_filenum, null_diskoff, pma, 2, &newi, (LSN*)0);
r=toku_pmainternal_make_space_at(null_logger, null_filenum, null_diskoff, pma, 2, &newi, (LSN*)0);
assert(r==0);
if (verbose) printf("Requested space at 2, got space at %d\n", newi);
if (verbose) toku_print_pma(pma);
......@@ -69,7 +71,7 @@ static void test_make_space_at (void) {
pma->pairs[3] = 0;
pma->n_pairs_present=2;
if (verbose) toku_print_pma(pma);
toku_pmainternal_make_space_at(null_txn, null_filenum, null_diskoff, pma, 0, &newi, (LSN*)0);
toku_pmainternal_make_space_at(null_logger, null_filenum, null_diskoff, pma, 0, &newi, (LSN*)0);
assert(r==0);
if (verbose) printf("Requested space at 0, got space at %d\n", newi);
if (verbose) toku_print_pma(pma);
......@@ -86,7 +88,7 @@ static void test_make_space_at (void) {
pma->pairs[7] = 0;
pma->n_pairs_present=2;
if (verbose) toku_print_pma(pma);
r=toku_pmainternal_make_space_at(null_txn, null_filenum, null_diskoff, pma, 5, &newi, (LSN*)0);
r=toku_pmainternal_make_space_at(null_logger, null_filenum, null_diskoff, pma, 5, &newi, (LSN*)0);
assert(r==0);
if (verbose) toku_print_pma(pma);
if (verbose) printf("r=%d\n", newi);
......@@ -180,7 +182,7 @@ static void test_smooth_region_N (int N) {
}
}
if (verbose) { toku_pmainternal_printpairs(pairs, N); printf(" at %d becomes f", insertat); }
toku_pmainternal_smooth_region(null_txn, null_filenum, null_diskoff, pairs, N, insertat, 0, 0, &r, (LSN*)0);
toku_pmainternal_smooth_region(null_logger, null_filenum, null_diskoff, pairs, N, insertat, 0, 0, &r, (LSN*)0);
if (verbose) { toku_pmainternal_printpairs(pairs, N); printf(" at %d\n", r); }
assert(0<=r); assert(r<N);
assert(pairs[r]==0);
......@@ -222,7 +224,7 @@ static void test_smooth_region6 (void) {
pairs[1] = kv_pair_malloc(key, strlen(key)+1, 0, 0);
int r;
toku_pmainternal_smooth_region(null_txn, null_filenum, null_diskoff, pairs, N, 2, 0, 0, &r, (LSN*)0);
toku_pmainternal_smooth_region(null_logger, null_filenum, null_diskoff, pairs, N, 2, 0, 0, &r, (LSN*)0);
if (verbose) {
printf("{ ");
for (i=0; i<N; i++)
......@@ -921,10 +923,10 @@ static void test_pma_dup_split_n(int n, int dup_mode) {
/* split the pma */
DBT splitk;
r = toku_pma_split(null_txn, null_filenum,
null_diskoff, pmaa, 0, arand, &asum, (LSN*)0,
&splitk,
null_diskoff, pmac, 0, crand, &csum, (LSN*)0);
r = toku_pma_split(null_logger, null_filenum,
null_diskoff, pmaa, 0, arand, &asum, (LSN*)0,
&splitk,
null_diskoff, pmac, 0, crand, &csum, (LSN*)0);
assert(r == 0);
toku_pma_verify(pmaa);
......@@ -1044,10 +1046,10 @@ static void test_pma_split_varkey(void) {
if (verbose) { printf("a:"); toku_print_pma(pmaa); }
r = toku_pma_split(null_txn, null_filenum,
null_diskoff, pmaa, 0, arand, &asum, (LSN*)0,
0,
null_diskoff, pmac, 0, crand, &csum, (LSN*)0);
r = toku_pma_split(null_logger, null_filenum,
null_diskoff, pmaa, 0, arand, &asum, (LSN*)0,
0,
null_diskoff, pmac, 0, crand, &csum, (LSN*)0);
assert(r == 0);
toku_pma_verify(pmaa);
toku_pma_verify(pmac);
......@@ -1192,7 +1194,7 @@ static void test_pma_bulk_insert_n(int n) {
}
/* bulk insert n kv pairs */
r = toku_pma_bulk_insert(null_txn, null_filenum, (DISKOFF)0, pma, keys, vals, n, rand4fingerprint, &sum, 0);
r = toku_pma_bulk_insert(null_logger, null_filenum, (DISKOFF)0, pma, keys, vals, n, rand4fingerprint, &sum, 0);
assert(r == 0);
assert(sum==expect_fingerprint);
toku_pma_verify(pma);
......
This diff is collapsed.
......@@ -48,7 +48,7 @@ int toku_pma_n_entries (PMA);
//enum pma_errors toku_pma_insert (PMA, bytevec key, ITEMLEN keylen, bytevec data, ITEMLEN datalen);
// The DB pointer is there so that the comparison function can be called.
enum pma_errors toku_pma_insert (PMA, DBT*, DBT*, TOKUTXN, FILENUM, DISKOFF, u_int32_t /*random for fingerprint */, u_int32_t */*fingerprint*/, LSN *node_lsn);
enum pma_errors toku_pma_insert (PMA, DBT*, DBT*, TOKULOGGER, TXNID, FILENUM, DISKOFF, u_int32_t /*random for fingerprint */, u_int32_t */*fingerprint*/, LSN *node_lsn);
/* This returns an error if the key is NOT present. */
int pma_replace (PMA, bytevec key, ITEMLEN keylen, bytevec data, ITEMLEN datalen);
......@@ -61,7 +61,7 @@ int toku_pma_delete (PMA, DBT */*key*/, DBT */*val*/, u_int32_t /*random for fin
int toku_pma_insert_or_replace (PMA /*pma*/, DBT */*k*/, DBT */*v*/,
int */*replaced_v_size*/, /* If it is a replacement, set to the size of the old value, otherwise set to -1. */
TOKUTXN /*txn*/, FILENUM, DISKOFF,
TOKULOGGER, TXNID, FILENUM, DISKOFF,
u_int32_t /*random for fingerprint*/, u_int32_t */*fingerprint*/,
LSN */*node_lsn*/);
......@@ -90,7 +90,7 @@ int toku_pma_search(PMA, brt_search_t *, DBT *, DBT *);
* The original PMA gets keys <= pivot key
* The NEWPMA gets keys > pivot key
*/
int toku_pma_split(TOKUTXN, FILENUM,
int toku_pma_split(TOKULOGGER, FILENUM,
DISKOFF /*diskoff*/, PMA /*pma*/, unsigned int */*pma_size*/, u_int32_t /*rand4sum*/, u_int32_t */*fingerprint*/, LSN* /*lsn*/,
DBT */*splitk*/,
DISKOFF /*newdiskoff*/, PMA /*newpma*/, unsigned int */*newpma_size*/, u_int32_t /*newrand4sum*/, u_int32_t */*newfingerprint*/, LSN* /*newlsn*/);
......@@ -106,7 +106,7 @@ int toku_pma_split(TOKUTXN, FILENUM,
* vals - an array of values
* n_newpairs - the number of key value pairs
*/
int toku_pma_bulk_insert(TOKUTXN, FILENUM, DISKOFF, PMA pma, DBT *keys, DBT *vals, int n_newpairs, u_int32_t rand4sem, u_int32_t *fingerprint, LSN */*node_lsn*/);
int toku_pma_bulk_insert(TOKULOGGER, FILENUM, DISKOFF, PMA pma, DBT *keys, DBT *vals, int n_newpairs, u_int32_t rand4sem, u_int32_t *fingerprint, LSN */*node_lsn*/);
int toku_pma_random_pick(PMA, bytevec *key, ITEMLEN *keylen, bytevec *data, ITEMLEN *datalen);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment