Commit 11bf6c52 authored by Rich Prohaska's avatar Rich Prohaska

mempoll testing

git-svn-id: file:///svn/tokudb@444 c7de825b-a66e-492c-adef-691d508d4ae1
parent e215a35f
......@@ -9,15 +9,29 @@ void mempool_init(struct mempool *mp, void *base, int size) {
mp->size = size;
mp->free_offset = 0;
mp->frag_size = 0;
mp->compress_func = 0;
mp->compress_arg = 0;
}
void mempool_fini(struct mempool *mp __attribute__((unused))) {
// printf("mempool_fini %p %p %d %d\n", mp, mp->base, mp->size, mp->frag_size);
}
void mempool_get_base_size(struct mempool *mp, void **base_ptr, int *size_ptr) {
*base_ptr = mp->base;
*size_ptr = mp->size;
void mempool_set_compress_func(struct mempool *mp, mempool_compress_func compress_func, void *compress_arg) {
mp->compress_func = compress_func;
mp->compress_arg = compress_arg;
}
void mempool_call_compress_func(struct mempool *mp) {
mp->compress_func(mp, mp->compress_arg);
}
void *mempool_get_base(struct mempool *mp) {
return mp->base;
}
int mempool_get_size(struct mempool *mp) {
return mp->size;
}
int mempool_get_frag_size(struct mempool *mp) {
......
#ifndef _TOKU_MEMPOOL_H
#define _TOKU_MEMPOOL_H
/* a memory pool is a contiguous region of memory that supports single
allocations from the pool. these allocated regions are never recycled.
when the memory pool no longer has free space, the allocated chunks
must be relocated by the application to a new memory pool. */
struct mempool;
typedef int (*mempool_compress_func)(struct mempool *mp, void *arg);
struct mempool {
void *base; /* the base address of the memory */
int free_offset; /* the offset of the memory pool free space */
int size; /* the size of the memory */
int frag_size; /* the size of the fragmented memory */
mempool_compress_func compress_func;
void *compress_arg;
};
/* initialize the memory pool with the base address and size of a
......@@ -17,8 +26,15 @@ void mempool_init(struct mempool *mp, void *base, int size);
/* finalize the memory pool */
void mempool_fini(struct mempool *mp);
/* get the base address and size of the memory pool */
void mempool_get_base_size(struct mempool *mp, void **base_ptr, int *size_ptr);
void mempool_set_compress_func(struct mempool *mp, mempool_compress_func compress_func, void *compress_arg);
void mempool_call_compress_func(struct mempool *mp);
/* get the base address of the memory pool */
void *mempool_get_base(struct mempool *mp);
/* get the size of the memory pool */
int mempool_get_size(struct mempool *mp);
/* get the amount of fragmented space in the memory pool */
int mempool_get_frag_size(struct mempool *mp);
......@@ -30,3 +46,5 @@ void *mempool_malloc(struct mempool *mp, int size, int alignment);
a count of the amount of free space in the memory pool. the memory
pool does not keep track of the locations of the free chunks */
void mempool_mfree(struct mempool *mp, void *vp, int size);
#endif
......@@ -19,6 +19,12 @@
/* get KEY_VALUE_OVERHEAD */
#include "brt-internal.h"
#ifndef PMA_USE_MEMPOOL
#define PMA_USE_MEMPOOL 1
#endif
#if PMA_USE_MEMPOOL
/* allocate a kv pair from the pma kv memory pool */
static struct kv_pair *kv_pair_malloc_mempool(void *key, int keylen, void *val, int vallen, struct mempool *mp) {
struct kv_pair *kv = mempool_malloc(mp, sizeof (struct kv_pair) + keylen + vallen, 4);
......@@ -55,20 +61,30 @@ static int pma_compress_kvspace(PMA pma) {
return 0;
}
#endif
/* malloc space for a kv pair from the pma memory pool and initialize it.
if the allocation fails, try to compress the memory pool and try again. */
static struct kv_pair *pma_malloc_kv_pair(PMA pma, void *k, int ksize, void *v, int vsize) {
static struct kv_pair *pma_malloc_kv_pair(PMA pma __attribute__((unused)), void *k, int ksize, void *v, int vsize) {
#if PMA_USE_MEMPOOL
struct kv_pair *kv = kv_pair_malloc_mempool(k, ksize, v, vsize, &pma->kvspace);
if (kv == 0) {
if (0 == pma_compress_kvspace(pma))
kv = kv_pair_malloc_mempool(k, ksize, v, vsize, &pma->kvspace);
}
#else
struct kv_pair *kv = kv_pair_malloc(k, ksize, v, vsize);
#endif
return kv;
}
static void pma_mfree_kv_pair(PMA pma, struct kv_pair *kv) {
static void pma_mfree_kv_pair(PMA pma __attribute__((unused)), struct kv_pair *kv) {
kv = kv_pair_ptr(kv);
#if PMA_USE_MEMPOOL
mempool_mfree(&pma->kvspace, kv, kv_pair_size(kv));
#else
kv_pair_free(kv);
#endif
}
int pma_n_entries (PMA pma) {
......@@ -375,9 +391,10 @@ int pma_create(PMA *pma, int (*compare_fun)(DB*,const DBT*,const DBT*), int maxs
if (maxsize == 0)
maxsize = 4*1024;
maxsize = maxsize + maxsize/4;
#if PMA_USE_MEMPOOL
void *mpbase = toku_malloc(maxsize); assert(mpbase);
mempool_init(&result->kvspace, mpbase, maxsize);
#endif
*pma = result;
assert((unsigned long)result->pairs[result->N]==0xdeadbeefL);
return 0;
......@@ -696,8 +713,11 @@ int pma_free (PMA *pmap) {
}
}
assert(pma->n_pairs_present == 0);
#if PMA_USE_MEMPOOL
void *mpbase = mempool_get_base(&pma->kvspace);
mempool_fini(&pma->kvspace);
void *mpbase; int mpsize; mempool_get_base_size(&pma->kvspace, &mpbase, &mpsize); toku_free(mpbase);
toku_free(mpbase);
#endif
toku_free(pma->pairs);
if (pma->skey) toku_free(pma->skey);
if (pma->sval) toku_free(pma->sval);
......@@ -714,6 +734,7 @@ int pma_insert (PMA pma, DBT *k, DBT *v, DB* db, TOKUTXN txn, diskoff diskoff) {
struct kv_pair *kv = kv_pair_ptr(pma->pairs[idx]);
if (0==pma->compare_fun(db, k, fill_dbt(&k2, kv->key, kv->keylen))) {
if (kv_pair_deleted(pma->pairs[idx])) {
pma_mfree_kv_pair(pma, pma->pairs[idx]);
pma->pairs[idx] = pma_malloc_kv_pair(pma, k->data, k->size, v->data, v->size);
assert(pma->pairs[idx]);
int r = tokulogger_log_phys_add_or_delete_in_leaf(txn, diskoff, 0, pma->pairs[idx]);
......@@ -851,7 +872,7 @@ int pma_insert_or_replace (PMA pma, DBT *k, DBT *v,
if (v->size == (unsigned int) kv_pair_vallen(kv)) {
memcpy(kv_pair_val(kv), v->data, v->size);
} else {
mempool_mfree(&pma->kvspace, kv, kv_pair_size(kv));
pma_mfree_kv_pair(pma, kv);
pma->pairs[idx] = pma_malloc_kv_pair(pma, k->data, k->size, v->data, v->size);
assert(pma->pairs[idx]);
}
......@@ -969,6 +990,8 @@ struct kv_pair_tag *__pma_extract_pairs(PMA pma, int npairs, int lo, int hi) {
return pairs;
}
#if PMA_USE_MEMPOOL
static void __pma_relocate_kvpairs(PMA pma) {
int i;
for (i=0; i<pma->N; i++) {
......@@ -981,6 +1004,8 @@ static void __pma_relocate_kvpairs(PMA pma) {
}
}
#endif
int pma_split(PMA origpma, unsigned int *origpma_size,
PMA leftpma, unsigned int *leftpma_size,
PMA rightpma, unsigned int *rightpma_size) {
......@@ -1037,7 +1062,9 @@ int pma_split(PMA origpma, unsigned int *origpma_size,
error = __pma_resize_array(leftpma, n + n/4, 0);
assert(error == 0);
distribute_data(leftpma->pairs, pma_index_limit(leftpma), &pairs[0], n, leftpma);
#if PMA_USE_MEMPOOL
__pma_relocate_kvpairs(leftpma);
#endif
__pma_update_cursors(leftpma, &cursors, &pairs[0], spliti);
leftpma->n_pairs_present = spliti;
......@@ -1046,7 +1073,9 @@ int pma_split(PMA origpma, unsigned int *origpma_size,
error = __pma_resize_array(rightpma, n + n/4, 0);
assert(error == 0);
distribute_data(rightpma->pairs, pma_index_limit(rightpma), &pairs[spliti], n, rightpma);
#if PMA_USE_MEMPOOL
__pma_relocate_kvpairs(rightpma);
#endif
__pma_update_cursors(rightpma, &cursors, &pairs[spliti], n);
rightpma->n_pairs_present = n;
......@@ -1117,8 +1146,12 @@ int pma_bulk_insert(PMA pma, DBT *keys, DBT *vals, int n_newpairs) {
}
for (i=0; i<n_newpairs; i++) {
#if PMA_USE_MEMPOOL
newpairs[i].pair = kv_pair_malloc_mempool(keys[i].data, keys[i].size,
vals[i].data, vals[i].size, &pma->kvspace);
#else
newpairs[i].pair = kv_pair_malloc(keys[i].data, keys[i].size, vals[i].data, vals[i].size);
#endif
if (newpairs[i].pair == 0) {
__pma_bulk_cleanup(pma, newpairs, i);
toku_free(newpairs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment