Commit 564569d0 authored by Yoni Fogel's avatar Yoni Fogel

addresses #553

create iterator for lth

git-svn-id: file:///svn/tokudb@2930 c7de825b-a66e-492c-adef-691d508d4ae1
parent ef195b7b
...@@ -4,8 +4,8 @@ ...@@ -4,8 +4,8 @@
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
/** /**
\file hash_table.h \file hash_lth.h
\brief Hash table \brief Hash lth
*/ */
...@@ -14,146 +14,156 @@ ...@@ -14,146 +14,156 @@
#include <errno.h> #include <errno.h>
#include <string.h> #include <string.h>
/* TODO: reallocate the hash table if it grows too big. Perhaps, use toku_get_prime in newbrt/primes.c */ /* TODO: reallocate the hash lth if it grows too big. Perhaps, use toku_get_prime in newbrt/primes.c */
const uint32 __toku_lth_init_size = 521; const uint32 __toku_lth_init_size = 521;
static inline uint32 toku__lth_hash(toku_lth* table, toku_lock_tree* key) { static inline uint32 toku__lth_hash(toku_lth* lth, toku_lock_tree* key) {
size_t tmp = (size_t)key; size_t tmp = (size_t)key;
return tmp % table->array_size; return tmp % lth->num_buckets;
} }
static inline void toku__invalidate_scan(toku_lth* table) { static inline void toku__invalidate_scan(toku_lth* lth) {
table->finger_end = TRUE; lth->iter_is_valid = FALSE;
} }
int toku_lth_create(toku_lth** ptable, int toku_lth_create(toku_lth** plth,
void* (*user_malloc) (size_t), void* (*user_malloc) (size_t),
void (*user_free) (void*), void (*user_free) (void*),
void* (*user_realloc)(void*, size_t)) { void* (*user_realloc)(void*, size_t)) {
assert(ptable && user_malloc && user_free && user_realloc); int r = ENOSYS;
int r; assert(plth && user_malloc && user_free && user_realloc);
toku_lth* tmp = (toku_lth*)user_malloc(sizeof(*tmp)); toku_lth* tmp = NULL;
if (0) { died1: user_free(tmp); return r; } tmp = (toku_lth*)user_malloc(sizeof(*tmp));
if (!tmp) return errno; if (!tmp) { r = ENOMEM; goto cleanup; }
memset(tmp, 0, sizeof(*tmp)); memset(tmp, 0, sizeof(*tmp));
tmp->malloc = user_malloc; tmp->malloc = user_malloc;
tmp->free = user_free; tmp->free = user_free;
tmp->realloc = user_realloc; tmp->realloc = user_realloc;
tmp->array_size = __toku_lth_init_size; tmp->num_buckets = __toku_lth_init_size;
tmp->table = (toku_lth_elt**) tmp->buckets = (toku_lth_elt*)
tmp->malloc(tmp->array_size * sizeof(*tmp->table)); tmp->malloc(tmp->num_buckets * sizeof(*tmp->buckets));
if (!tmp->table) { r = errno; goto died1; } if (!tmp->buckets) { r = ENOMEM; goto cleanup; }
memset(tmp->table, 0, tmp->array_size * sizeof(*tmp->table)); memset(tmp->buckets, 0, tmp->num_buckets * sizeof(*tmp->buckets));
toku__invalidate_scan(tmp); toku__invalidate_scan(tmp);
*ptable = tmp; tmp->iter_head.next_in_iteration = &tmp->iter_head;
return 0; tmp->iter_head.prev_in_iteration = &tmp->iter_head;
*plth = tmp;
r = 0;
cleanup:
if (r != 0) {
if (tmp) {
if (tmp->buckets) { user_free(tmp->buckets); }
user_free(tmp);
}
}
return r;
} }
toku_lock_tree* toku_lth_find(toku_lth* table, toku_lock_tree* key) { toku_lock_tree* toku_lth_find(toku_lth* lth, toku_lock_tree* key) {
assert(table && key); assert(lth && key);
uint32 index = toku__lth_hash(table, key); uint32 index = toku__lth_hash(lth, key);
toku_lth_elt* element = table->table[index]; toku_lth_elt* head = &lth->buckets[index];
while (element && element->key != key) element = element->next; toku_lth_elt* current = head->next_in_bucket;
return element ? element->key : NULL; while (current) {
if (current->value.hash_key == key) break;
current = current->next_in_bucket;
}
return current ? current->value.hash_key : NULL;
} }
void toku_lth_start_scan(toku_lth* table) { void toku_lth_start_scan(toku_lth* lth) {
assert(table); assert(lth);
table->finger_index = 0; lth->iter_curr = &lth->iter_head;
table->finger_ptr = table->table[table->finger_index]; lth->iter_is_valid = TRUE;
table->finger_start = TRUE;
table->finger_end = FALSE;
} }
static inline toku_lth_elt* toku__lth_next(toku_lth* table) { static inline toku_lth_elt* toku__lth_next(toku_lth* lth) {
assert(table); assert(lth);
assert(!table->finger_end); assert(lth->iter_is_valid);
if (table->finger_ptr && !table->finger_start) { lth->iter_curr = lth->iter_curr->next_in_iteration;
table->finger_ptr = table->finger_ptr->next; lth->iter_is_valid = lth->iter_curr != &lth->iter_head;
} return lth->iter_curr;
while (!table->finger_ptr && ++table->finger_index < table->array_size) {
table->finger_ptr = table->table[table->finger_index];
}
table->finger_start = FALSE;
table->finger_end = !table->finger_ptr;
return table->finger_ptr;
} }
toku_lock_tree* toku_lth_next(toku_lth* table) { toku_lock_tree* toku_lth_next(toku_lth* lth) {
assert(table); assert(lth);
toku_lth_elt* next = toku__lth_next(table); toku_lth_elt* next = toku__lth_next(lth);
return next ? next->key : NULL; return lth->iter_curr != &lth->iter_head ? next->value.hash_key : NULL;
} }
/* Element MUST exist. */ /* Element MUST exist. */
void toku_lth_delete(toku_lth* table, toku_lock_tree* key) { void toku_lth_delete(toku_lth* lth, toku_lock_tree* key) {
assert(table && key); assert(lth && key);
toku__invalidate_scan(table); toku__invalidate_scan(lth);
/* Must have elements. */ /* Must have elements. */
assert(table->num_keys); assert(lth->num_keys);
uint32 index = toku__lth_hash(table, key); uint32 index = toku__lth_hash(lth, key);
toku_lth_elt* element = table->table[index]; toku_lth_elt* head = &lth->buckets[index];
toku_lth_elt* prev = head;
/* Elements of the right hash must exist. */ toku_lth_elt* current = prev->next_in_bucket;
assert(element);
/* Case where it is the first element. */ while (current != head) {
if (element->key == key) { if (current->value.hash_key == key) break;
table->table[index] = element->next; prev = current;
table->free(element); current = current->next_in_bucket;
table->num_keys--;
return;
} }
toku_lth_elt* prev;
/* Case where it is not the first element. */
do {
assert(element);
prev = element;
element = element->next;
} while (element->key != key);
/* Must be found. */ /* Must be found. */
assert(element); assert(current != head);
prev->next = element->next; current->prev_in_iteration->next_in_iteration = current->next_in_iteration;
table->free(element); current->next_in_iteration->prev_in_iteration = current->prev_in_iteration;
table->num_keys--; prev->next_in_bucket = current->next_in_bucket;
lth->free(current);
lth->num_keys--;
return; return;
} }
/* Will allow you to insert it over and over. You need to keep track. */ /* Will allow you to insert it over and over. You need to keep track. */
int toku_lth_insert(toku_lth* table, toku_lock_tree* key) { int toku_lth_insert(toku_lth* lth, toku_lock_tree* key) {
assert(table && key); int r = ENOSYS;
toku__invalidate_scan(table); assert(lth && key);
toku__invalidate_scan(lth);
uint32 index = toku__lth_hash(table, key); uint32 index = toku__lth_hash(lth, key);
/* Allocate a new one. */ /* Allocate a new one. */
toku_lth_elt* element = (toku_lth_elt*)table->malloc(sizeof(*element)); toku_lth_elt* element = (toku_lth_elt*)lth->malloc(sizeof(*element));
if (!element) return errno; if (!element) { r = ENOMEM; goto cleanup; }
memset(element, 0, sizeof(*element)); memset(element, 0, sizeof(*element));
element->key = key; element->value.hash_key = key;
element->next = table->table[index]; element->next_in_iteration = lth->iter_head.next_in_iteration;
table->table[index] = element; element->prev_in_iteration = &lth->iter_head;
table->num_keys++; element->next_in_iteration->prev_in_iteration = element;
return 0; element->prev_in_iteration->next_in_iteration = element;
element->next_in_bucket = lth->buckets[index].next_in_bucket;
lth->buckets[index].next_in_bucket = element;
lth->num_keys++;
r = 0;
cleanup:
return r;
} }
void toku_lth_close(toku_lth* table) { void toku_lth_close(toku_lth* lth) {
assert(table); assert(lth);
toku_lth_elt* element; toku_lth_elt* element;
toku_lth_elt* head = &lth->iter_head;
toku_lth_elt* next = NULL; toku_lth_elt* next = NULL;
toku_lth_start_scan(table); toku_lth_start_scan(lth);
next = toku__lth_next(table); next = toku__lth_next(lth);
while (next) { while (next != head) {
element = next; element = next;
next = toku__lth_next(table); next = toku__lth_next(lth);
table->free(element); lth->free(element);
} }
table->free(table->table); lth->free(lth->buckets);
table->free(table); lth->free(lth);
} }
...@@ -3,6 +3,9 @@ ...@@ -3,6 +3,9 @@
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#if !defined(TOKU_LTH_H)
#define TOKU_LTH_H
/** /**
\file hash_table.h \file hash_table.h
\brief Hash table \brief Hash table
...@@ -14,21 +17,27 @@ ...@@ -14,21 +17,27 @@
#include <brttypes.h> #include <brttypes.h>
#include <locktree.h> #include <locktree.h>
typedef struct __toku_lth_value toku_lth_value;
struct __toku_lth_value {
toku_lock_tree* hash_key;
};
typedef struct __toku_lth_elt toku_lth_elt; typedef struct __toku_lth_elt toku_lth_elt;
struct __toku_lth_elt { struct __toku_lth_elt {
toku_lock_tree* key; toku_lth_value value;
toku_lth_elt* next; toku_lth_elt* next_in_bucket;
toku_lth_elt* next_in_iteration;
toku_lth_elt* prev_in_iteration;
}; };
typedef struct __toku_lth toku_lth; typedef struct __toku_lth toku_lth;
struct __toku_lth { struct __toku_lth {
toku_lth_elt** table; toku_lth_elt* buckets;
uint32 num_buckets;
uint32 num_keys; uint32 num_keys;
uint32 array_size; toku_lth_elt iter_head;
uint32 finger_index; toku_lth_elt* iter_curr;
toku_lth_elt* finger_ptr; BOOL iter_is_valid;
BOOL finger_start;
BOOL finger_end;
/** The user malloc function */ /** The user malloc function */
void* (*malloc) (size_t); void* (*malloc) (size_t);
/** The user free function */ /** The user free function */
...@@ -44,7 +53,7 @@ int toku_lth_create(toku_lth** ptable, ...@@ -44,7 +53,7 @@ int toku_lth_create(toku_lth** ptable,
toku_lock_tree* toku_lth_find (toku_lth* table, toku_lock_tree* key); toku_lock_tree* toku_lth_find (toku_lth* table, toku_lock_tree* key);
void toku_lth_start_scan(toku_lth* table); void toku_lth_start_scan (toku_lth* table);
toku_lock_tree* toku_lth_next (toku_lth* table); toku_lock_tree* toku_lth_next (toku_lth* table);
...@@ -53,3 +62,5 @@ void toku_lth_delete (toku_lth* table, toku_lock_tree* key); ...@@ -53,3 +62,5 @@ void toku_lth_delete (toku_lth* table, toku_lock_tree* key);
void toku_lth_close (toku_lth* table); void toku_lth_close (toku_lth* table);
int toku_lth_insert (toku_lth* table, toku_lock_tree* key); int toku_lth_insert (toku_lth* table, toku_lock_tree* key);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment