Commit 8eaa912f authored by Rich Prohaska's avatar Rich Prohaska Committed by Yoni Fogel

merge generalized worker threads from the 1183 branch. addresses #1183

git-svn-id: file:///svn/toku/tokudb@8902 c7de825b-a66e-492c-adef-691d508d4ae1
parent c730d763
...@@ -54,6 +54,7 @@ BRT_SOURCES = \ ...@@ -54,6 +54,7 @@ BRT_SOURCES = \
recover \ recover \
roll \ roll \
threadpool \ threadpool \
toku_worker \
trace_mem \ trace_mem \
x1764 \ x1764 \
ybt \ ybt \
......
// When objects are evicted from the cachetable, they are written to storage by a
// thread in a thread pool. The objects are placed onto a write queue that feeds
// the thread pool. The write queue expects that an external mutex is used to
// protect it.
typedef struct writequeue *WRITEQUEUE;
struct writequeue {
PAIR head, tail; // head and tail of the linked list of pair's
toku_pthread_cond_t wait_read; // wait for read
int want_read; // number of threads waiting to read
toku_pthread_cond_t wait_write; // wait for write
int want_write; // number of threads waiting to write
int ninq; // number of pairs in the queue
char closed; // kicks waiting threads off of the write queue
};
// initialize a writequeue
// expects: the writequeue is not initialized
// effects: the writequeue is set to empty and the condition variable is initialized
static void writequeue_init(WRITEQUEUE wq) {
wq->head = wq->tail = 0;
int r;
r = toku_pthread_cond_init(&wq->wait_read, 0); assert(r == 0);
wq->want_read = 0;
r = toku_pthread_cond_init(&wq->wait_write, 0); assert(r == 0);
wq->want_write = 0;
wq->ninq = 0;
wq->closed = 0;
}
// destroy a writequeue
// expects: the writequeue must be initialized and empty
static void writequeue_destroy(WRITEQUEUE wq) {
assert(wq->head == 0 && wq->tail == 0);
int r;
r = toku_pthread_cond_destroy(&wq->wait_read); assert(r == 0);
r = toku_pthread_cond_destroy(&wq->wait_write); assert(r == 0);
}
// close the writequeue
// effects: signal any threads blocked in the writequeue
static void writequeue_set_closed(WRITEQUEUE wq) {
wq->closed = 1;
int r;
r = toku_pthread_cond_broadcast(&wq->wait_read); assert(r == 0);
r = toku_pthread_cond_broadcast(&wq->wait_write); assert(r == 0);
}
// determine whether or not the write queue is empty
// return: 1 if the write queue is empty, otherwise 0
static int writequeue_empty(WRITEQUEUE wq) {
return wq->head == 0;
}
// put a pair at the tail of the write queue
// expects: the mutex is locked
// effects: append the pair to the end of the write queue and signal
// any readers.
static void writequeue_enq(WRITEQUEUE wq, PAIR pair) {
pair->next_wq = 0;
if (wq->tail)
wq->tail->next_wq = pair;
else
wq->head = pair;
wq->tail = pair;
wq->ninq++;
if (wq->want_read) {
int r = toku_pthread_cond_signal(&wq->wait_read); assert(r == 0);
}
}
// get a pair from the head of the write queue
// expects: the mutex is locked
// effects: wait until the writequeue is not empty, remove the first pair from the
// write queue and return it
// returns: 0 if success, otherwise an error
static int writequeue_deq(WRITEQUEUE wq, toku_pthread_mutex_t *mutex, PAIR *pairptr) {
while (writequeue_empty(wq)) {
if (wq->closed)
return EINVAL;
wq->want_read++;
int r = toku_pthread_cond_wait(&wq->wait_read, mutex); assert(r == 0);
wq->want_read--;
}
PAIR pair = wq->head;
wq->head = pair->next_wq;
if (wq->head == 0)
wq->tail = 0;
wq->ninq--;
pair->next_wq = 0;
*pairptr = pair;
return 0;
}
// suspend the writer thread
// expects: the mutex is locked
static void writequeue_wait_write(WRITEQUEUE wq, toku_pthread_mutex_t *mutex) {
wq->want_write++;
int r = toku_pthread_cond_wait(&wq->wait_write, mutex); assert(r == 0);
wq->want_write--;
}
// wakeup the writer threads
// expects: the mutex is locked
static void writequeue_wakeup_write(WRITEQUEUE wq) {
if (wq->want_write) {
int r = toku_pthread_cond_broadcast(&wq->wait_write); assert(r == 0);
}
}
This diff is collapsed.
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <fcntl.h> #include <fcntl.h>
#include "brttypes.h" #include "brttypes.h"
#include "workqueue.h"
// Maintain a cache mapping from cachekeys to values (void*) // Maintain a cache mapping from cachekeys to values (void*)
// Some of the keys can be pinned. Don't pin too many or for too long. // Some of the keys can be pinned. Don't pin too many or for too long.
...@@ -49,6 +50,10 @@ int toku_cachetable_openf (CACHEFILE *,CACHETABLE, const char */*fname*/, int fl ...@@ -49,6 +50,10 @@ int toku_cachetable_openf (CACHEFILE *,CACHETABLE, const char */*fname*/, int fl
// Bind a file to a new cachefile object. // Bind a file to a new cachefile object.
int toku_cachetable_openfd (CACHEFILE *,CACHETABLE, int /*fd*/, const char */*fname (used for logging)*/); int toku_cachetable_openfd (CACHEFILE *,CACHETABLE, int /*fd*/, const char */*fname (used for logging)*/);
// Get access to the asynchronous work queue
// Returns: a pointer to the work queue
WORKQUEUE toku_cachetable_get_workqueue(CACHETABLE);
// The flush callback is called when a key value pair is being written to storage and possibly removed from the cachetable. // The flush callback is called when a key value pair is being written to storage and possibly removed from the cachetable.
// When write_me is true, the value should be written to storage. // When write_me is true, the value should be written to storage.
// When keep_me is false, the value should be freed. // When keep_me is false, the value should be freed.
...@@ -65,6 +70,7 @@ void toku_cachefile_set_userdata(CACHEFILE cf, void *userdata, int (*close_userd ...@@ -65,6 +70,7 @@ void toku_cachefile_set_userdata(CACHEFILE cf, void *userdata, int (*close_userd
// Effect: Store some cachefile-specific user data. When the last reference to a cachefile is closed, we call close_userdata(). // Effect: Store some cachefile-specific user data. When the last reference to a cachefile is closed, we call close_userdata().
// When the cachefile needs to be checkpointed, we call checkpoint_userdata(). // When the cachefile needs to be checkpointed, we call checkpoint_userdata().
// If userdata is already non-NULL, then we simply overwrite it. // If userdata is already non-NULL, then we simply overwrite it.
void *toku_cachefile_get_userdata(CACHEFILE); void *toku_cachefile_get_userdata(CACHEFILE);
// Effect: Get the user dataa. // Effect: Get the user dataa.
...@@ -97,8 +103,10 @@ int toku_cachetable_get_and_pin(CACHEFILE, CACHEKEY, u_int32_t /*fullhash*/, ...@@ -97,8 +103,10 @@ int toku_cachetable_get_and_pin(CACHEFILE, CACHEKEY, u_int32_t /*fullhash*/,
int toku_cachetable_maybe_get_and_pin (CACHEFILE, CACHEKEY, u_int32_t /*fullhash*/, void**); int toku_cachetable_maybe_get_and_pin (CACHEFILE, CACHEKEY, u_int32_t /*fullhash*/, void**);
// cachetable object state WRT external memory // cachetable object state WRT external memory
#define CACHETABLE_CLEAN 0 enum cachetable_object_state {
#define CACHETABLE_DIRTY 1 CACHETABLE_CLEAN=0, // the cached object is clean WRT the cachefile
CACHETABLE_DIRTY=1, // the cached object is dirty WRT the cachefile
};
// Unpin a memory object // Unpin a memory object
// Effects: If the memory object is in the cachetable, then OR the dirty flag, // Effects: If the memory object is in the cachetable, then OR the dirty flag,
...@@ -110,6 +118,13 @@ int toku_cachetable_unpin_and_remove (CACHEFILE, CACHEKEY); /* Removing somethin ...@@ -110,6 +118,13 @@ int toku_cachetable_unpin_and_remove (CACHEFILE, CACHEKEY); /* Removing somethin
// Effect: Remove an object from the cachetable. Don't write it back. // Effect: Remove an object from the cachetable. Don't write it back.
// Requires: The object must be pinned exactly once. // Requires: The object must be pinned exactly once.
// Prefetch a memory object for a given key into the cachetable
// Returns: 0 if success
int toku_cachefile_prefetch(CACHEFILE cf, CACHEKEY key, u_int32_t fullhash,
CACHETABLE_FLUSH_CALLBACK flush_callback,
CACHETABLE_FETCH_CALLBACK fetch_callback,
void *extraargs);
int toku_cachetable_assert_all_unpinned (CACHETABLE); int toku_cachetable_assert_all_unpinned (CACHETABLE);
int toku_cachefile_count_pinned (CACHEFILE, int /*printthem*/ ); int toku_cachefile_count_pinned (CACHEFILE, int /*printthem*/ );
......
...@@ -34,8 +34,6 @@ REGRESSION_TESTS_RAW = \ ...@@ -34,8 +34,6 @@ REGRESSION_TESTS_RAW = \
brt-test4 \ brt-test4 \
brt-test5 \ brt-test5 \
cachetable-rwlock-test \ cachetable-rwlock-test \
cachetable-writequeue-test \
threadpool-test \
cachetable-test \ cachetable-test \
cachetable-test2 \ cachetable-test2 \
cachetable-put-test \ cachetable-put-test \
...@@ -74,6 +72,8 @@ REGRESSION_TESTS_RAW = \ ...@@ -74,6 +72,8 @@ REGRESSION_TESTS_RAW = \
test-leafentry \ test-leafentry \
test_oexcl \ test_oexcl \
test_toku_malloc_plain_free \ test_toku_malloc_plain_free \
threadpool-test \
workqueue-test \
x1764-test \ x1764-test \
ybt-test \ ybt-test \
# This line intentially kept commented so I can have a \ on the end of the previous line # This line intentially kept commented so I can have a \ on the end of the previous line
......
...@@ -149,6 +149,7 @@ static int fetch (CACHEFILE f, CACHEKEY key, u_int32_t fullhash __attribute__((_ ...@@ -149,6 +149,7 @@ static int fetch (CACHEFILE f, CACHEKEY key, u_int32_t fullhash __attribute__((_
assert (expect_f==f); assert (expect_f==f);
assert((long)extraargs==23); assert((long)extraargs==23);
*value = make_item(key.b); *value = make_item(key.b);
*sizep = test_object_size;
did_fetch=key; did_fetch=key;
written_lsn->lsn = 0; written_lsn->lsn = 0;
return 0; return 0;
......
#include "toku_portability.h"
#include "toku_os.h"
#include "test.h"
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <unistd.h> #include <unistd.h>
...@@ -9,14 +5,20 @@ ...@@ -9,14 +5,20 @@
#include <string.h> #include <string.h>
#include <errno.h> #include <errno.h>
#include <malloc.h> #include <malloc.h>
#include <toku_pthread.h>
#include "toku_portability.h"
#include "toku_os.h"
#include "toku_pthread.h"
#include "threadpool.h" #include "threadpool.h"
int verbose;
struct my_threadpool { struct my_threadpool {
THREADPOOL threadpool; THREADPOOL threadpool;
toku_pthread_mutex_t mutex; toku_pthread_mutex_t mutex;
toku_pthread_cond_t wait; toku_pthread_cond_t wait;
int closed; int closed;
int counter;
}; };
static void static void
...@@ -27,10 +29,11 @@ my_threadpool_init (struct my_threadpool *my_threadpool, int max_threads) { ...@@ -27,10 +29,11 @@ my_threadpool_init (struct my_threadpool *my_threadpool, int max_threads) {
r = toku_pthread_mutex_init(&my_threadpool->mutex, 0); assert(r == 0); r = toku_pthread_mutex_init(&my_threadpool->mutex, 0); assert(r == 0);
r = toku_pthread_cond_init(&my_threadpool->wait, 0); assert(r == 0); r = toku_pthread_cond_init(&my_threadpool->wait, 0); assert(r == 0);
my_threadpool->closed = 0; my_threadpool->closed = 0;
my_threadpool->counter = 0;
} }
static void static void
my_threadpool_destroy (struct my_threadpool *my_threadpool) { my_threadpool_destroy (struct my_threadpool *my_threadpool, int max_threads) {
int r; int r;
r = toku_pthread_mutex_lock(&my_threadpool->mutex); assert(r == 0); r = toku_pthread_mutex_lock(&my_threadpool->mutex); assert(r == 0);
my_threadpool->closed = 1; my_threadpool->closed = 1;
...@@ -39,30 +42,18 @@ my_threadpool_destroy (struct my_threadpool *my_threadpool) { ...@@ -39,30 +42,18 @@ my_threadpool_destroy (struct my_threadpool *my_threadpool) {
if (verbose) printf("current %d\n", threadpool_get_current_threads(my_threadpool->threadpool)); if (verbose) printf("current %d\n", threadpool_get_current_threads(my_threadpool->threadpool));
threadpool_destroy(&my_threadpool->threadpool); assert(my_threadpool->threadpool == 0); threadpool_destroy(&my_threadpool->threadpool); assert(my_threadpool->threadpool == 0);
assert(my_threadpool->counter == max_threads);
r = toku_pthread_mutex_destroy(&my_threadpool->mutex); assert(r == 0); r = toku_pthread_mutex_destroy(&my_threadpool->mutex); assert(r == 0);
r = toku_pthread_cond_destroy(&my_threadpool->wait); assert(r == 0); r = toku_pthread_cond_destroy(&my_threadpool->wait); assert(r == 0);
} }
static void * static void *
fbusy (void *arg) { my_thread_f (void *arg) {
struct my_threadpool *my_threadpool = arg;
int r;
r = toku_pthread_mutex_lock(&my_threadpool->mutex); assert(r == 0);
while (!my_threadpool->closed) {
r = toku_pthread_cond_wait(&my_threadpool->wait, &my_threadpool->mutex); assert(r == 0);
}
r = toku_pthread_mutex_unlock(&my_threadpool->mutex); assert(r == 0);
if (verbose) printf("%lu:%s:exit\n", (unsigned long)toku_os_gettid(), __FUNCTION__);
return arg;
}
static void *
fidle (void *arg) {
struct my_threadpool *my_threadpool = arg; struct my_threadpool *my_threadpool = arg;
int r; int r;
r = toku_pthread_mutex_lock(&my_threadpool->mutex); assert(r == 0); r = toku_pthread_mutex_lock(&my_threadpool->mutex); assert(r == 0);
my_threadpool->counter++;
while (!my_threadpool->closed) { while (!my_threadpool->closed) {
r = toku_pthread_cond_wait(&my_threadpool->wait, &my_threadpool->mutex); assert(r == 0); r = toku_pthread_cond_wait(&my_threadpool->wait, &my_threadpool->mutex); assert(r == 0);
} }
...@@ -92,7 +83,7 @@ usage (void) { ...@@ -92,7 +83,7 @@ usage (void) {
} }
int int
test_main(int argc, const char *argv[]) { main(int argc, const char *argv[]) {
int max_threads = 1; int max_threads = 1;
#if defined(__linux__) #if defined(__linux__)
int do_malloc_fail = 0; int do_malloc_fail = 0;
...@@ -121,28 +112,15 @@ test_main(int argc, const char *argv[]) { ...@@ -121,28 +112,15 @@ test_main(int argc, const char *argv[]) {
struct my_threadpool my_threadpool; struct my_threadpool my_threadpool;
THREADPOOL threadpool; THREADPOOL threadpool;
// test threadpool busy causes no threads to be created
my_threadpool_init(&my_threadpool, max_threads); my_threadpool_init(&my_threadpool, max_threads);
threadpool = my_threadpool.threadpool; threadpool = my_threadpool.threadpool;
if (verbose) printf("test threadpool_set_busy\n"); if (verbose) printf("test threadpool_set_busy\n");
for (i=0; i<2*max_threads; i++) { for (i=0; i<2*max_threads; i++) {
threadpool_maybe_add(threadpool, fbusy, &my_threadpool); assert(threadpool_get_current_threads(threadpool) == (i >= max_threads ? max_threads : i));
assert(threadpool_get_current_threads(threadpool) == 1); threadpool_maybe_add(threadpool, my_thread_f, &my_threadpool);
}
assert(threadpool_get_current_threads(threadpool) == 1);
my_threadpool_destroy(&my_threadpool);
// test threadpool idle causes up to max_threads to be created
my_threadpool_init(&my_threadpool, max_threads);
threadpool = my_threadpool.threadpool;
if (verbose) printf("test threadpool_set_idle\n");
for (i=0; i<2*max_threads; i++) {
threadpool_maybe_add(threadpool, fidle, &my_threadpool);
sleep(1);
assert(threadpool_get_current_threads(threadpool) <= max_threads);
} }
assert(threadpool_get_current_threads(threadpool) == max_threads); assert(threadpool_get_current_threads(threadpool) == max_threads);
my_threadpool_destroy(&my_threadpool); my_threadpool_destroy(&my_threadpool, max_threads);
#if DO_MALLOC_HOOK #if DO_MALLOC_HOOK
if (do_malloc_fail) { if (do_malloc_fail) {
......
#include "includes.h" #include <stdio.h>
#include <errno.h>
#include <string.h>
#include "toku_portability.h"
#include "toku_assert.h"
#include "toku_pthread.h"
#include "memory.h"
#include "workqueue.h"
#include "threadpool.h"
#include "test.h"
int verbose; int verbose;
typedef struct ctpair *PAIR; static WORKITEM
struct ctpair { new_workitem (void) {
PAIR next_wq; WORKITEM wi = (WORKITEM) toku_malloc(sizeof *wi); assert(wi);
}; return wi;
static PAIR
new_pair (void) {
PAIR p = (PAIR) toku_malloc(sizeof *p); assert(p);
return p;
} }
static void static void
destroy_pair(PAIR p) { destroy_workitem(WORKITEM wi) {
toku_free(p); toku_free(wi);
} }
#include "cachetable-writequeue.h"
// test simple create and destroy // test simple create and destroy
static void static void
test_create_destroy (void) { test_create_destroy (void) {
struct writequeue writequeue, *wq = &writequeue; if (verbose) printf("%s:%d\n", __FUNCTION__, __LINE__);
writequeue_init(wq); struct workqueue workqueue, *wq = &workqueue;
assert(writequeue_empty(wq)); workqueue_init(wq);
writequeue_destroy(wq); assert(workqueue_empty(wq));
workqueue_destroy(wq);
} }
// verify that the wq implements FIFO ordering // verify that the wq implements FIFO ordering
static void static void
test_simple_enq_deq (int n) { test_simple_enq_deq (int n) {
struct writequeue writequeue, *wq = &writequeue; if (verbose) printf("%s:%d\n", __FUNCTION__, __LINE__);
struct workqueue workqueue, *wq = &workqueue;
int r; int r;
toku_pthread_mutex_t mutex;
r = toku_pthread_mutex_init(&mutex, 0); assert(r == 0); workqueue_init(wq);
writequeue_init(wq); assert(workqueue_empty(wq));
assert(writequeue_empty(wq)); WORKITEM work[n];
PAIR pairs[n];
int i; int i;
for (i=0; i<n; i++) { for (i=0; i<n; i++) {
pairs[i] = new_pair(); work[i] = new_workitem();
writequeue_enq(wq, pairs[i]); workqueue_enq(wq, work[i], 1);
assert(!writequeue_empty(wq)); assert(!workqueue_empty(wq));
} }
for (i=0; i<n; i++) { for (i=0; i<n; i++) {
PAIR p = 0; WORKITEM wi = 0;
r = writequeue_deq(wq, &mutex, &p); r = workqueue_deq(wq, &wi, 1);
assert(r == 0 && p == pairs[i]); assert(r == 0 && wi == work[i]);
destroy_pair(p); destroy_workitem(wi);
} }
assert(writequeue_empty(wq)); assert(workqueue_empty(wq));
writequeue_destroy(wq); workqueue_destroy(wq);
r = toku_pthread_mutex_destroy(&mutex); assert(r == 0);
} }
// setting the wq closed should cause deq to return EINVAL // setting the wq closed should cause deq to return EINVAL
static void static void
test_set_closed (void) { test_set_closed (void) {
struct writequeue writequeue, *wq = &writequeue; if (verbose) printf("%s:%d\n", __FUNCTION__, __LINE__);
writequeue_init(wq); struct workqueue workqueue, *wq = &workqueue;
writequeue_set_closed(wq); workqueue_init(wq);
int r = writequeue_deq(wq, 0, 0); WORKITEM wi = 0;
assert(r == EINVAL); workqueue_set_closed(wq, 1);
writequeue_destroy(wq); int r = workqueue_deq(wq, &wi, 1);
assert(r == EINVAL && wi == 0);
workqueue_destroy(wq);
} }
// closing a wq with a blocked reader thread should cause the reader to get EINVAL // closing a wq with a blocked reader thread should cause the reader to get EINVAL
struct writequeue_with_mutex {
struct writequeue writequeue;
toku_pthread_mutex_t mutex;
};
static void
writequeue_with_mutex_init (struct writequeue_with_mutex *wqm) {
writequeue_init(&wqm->writequeue);
int r = toku_pthread_mutex_init(&wqm->mutex, 0); assert(r == 0);
}
static void
writequeue_with_mutex_destroy (struct writequeue_with_mutex *wqm) {
writequeue_destroy(&wqm->writequeue);
int r = toku_pthread_mutex_destroy(&wqm->mutex); assert(r == 0);
}
static void * static void *
test_set_closed_waiter(void *arg) { test_set_closed_waiter(void *arg) {
struct writequeue_with_mutex *wqm = arg; struct workqueue *wq = arg;
int r; int r;
r = toku_pthread_mutex_lock(&wqm->mutex); assert(r == 0); WORKITEM wi = 0;
PAIR p; r = workqueue_deq(wq, &wi, 1);
r = writequeue_deq(&wqm->writequeue, &wqm->mutex, &p); assert(r == EINVAL && wi == 0);
assert(r == EINVAL);
r = toku_pthread_mutex_unlock(&wqm->mutex); assert(r == 0);
return arg; return arg;
} }
static void static void
test_set_closed_thread (void) { test_set_closed_thread (void) {
struct writequeue_with_mutex writequeue_with_mutex, *wqm = &writequeue_with_mutex; if (verbose) printf("%s:%d\n", __FUNCTION__, __LINE__);
struct workqueue workqueue, *wq = &workqueue;
int r; int r;
writequeue_with_mutex_init(wqm); workqueue_init(wq);
toku_pthread_t tid; toku_pthread_t tid;
r = toku_pthread_create(&tid, 0, test_set_closed_waiter, wqm); assert(r == 0); r = toku_pthread_create(&tid, 0, test_set_closed_waiter, wq); assert(r == 0);
sleep(1); sleep(1);
r = toku_pthread_mutex_lock(&wqm->mutex); assert(r == 0); workqueue_set_closed(wq, 1);
writequeue_set_closed(&wqm->writequeue);
r = toku_pthread_mutex_unlock(&wqm->mutex); assert(r == 0);
void *ret; void *ret;
r = toku_pthread_join(tid, &ret); r = toku_pthread_join(tid, &ret);
assert(r == 0 && ret == wqm); assert(r == 0 && ret == wq);
writequeue_with_mutex_destroy(wqm); workqueue_destroy(wq);
} }
// verify writer reader flow control // verify writer reader flow control
...@@ -130,82 +111,92 @@ test_set_closed_thread (void) { ...@@ -130,82 +111,92 @@ test_set_closed_thread (void) {
// writers when the wq size <= 1/2 of the wq limit // writers when the wq size <= 1/2 of the wq limit
struct rwfc { struct rwfc {
toku_pthread_mutex_t mutex; struct workqueue workqueue;
struct writequeue writequeue;
int current, limit; int current, limit;
}; };
static void rwfc_init (struct rwfc *rwfc, int limit) { static void rwfc_init (struct rwfc *rwfc, int limit) {
int r; workqueue_init(&rwfc->workqueue);
r = toku_pthread_mutex_init(&rwfc->mutex, 0); assert(r == 0);
writequeue_init(&rwfc->writequeue);
rwfc->current = 0; rwfc->limit = limit; rwfc->current = 0; rwfc->limit = limit;
} }
static void static void
rwfc_destroy (struct rwfc *rwfc) { rwfc_destroy (struct rwfc *rwfc) {
int r; workqueue_destroy(&rwfc->workqueue);
writequeue_destroy(&rwfc->writequeue); }
r = toku_pthread_mutex_destroy(&rwfc->mutex); assert(r == 0);
static void
rwfc_do_read (WORKITEM wi) {
struct rwfc *rwfc = (struct rwfc *) workitem_arg(wi);
workqueue_lock(&rwfc->workqueue);
if (2*rwfc->current-- > rwfc->limit && 2*rwfc->current <= rwfc->limit) {
workqueue_wakeup_write(&rwfc->workqueue, 0);
}
workqueue_unlock(&rwfc->workqueue);
destroy_workitem(wi);
} }
static void * static void *
rwfc_reader (void *arg) { rwfc_worker (void *arg) {
struct rwfc *rwfc = arg; struct workqueue *wq = arg;
int r;
while (1) { while (1) {
PAIR ctpair; WORKITEM wi = 0;
r = toku_pthread_mutex_lock(&rwfc->mutex); assert(r == 0); int r = workqueue_deq(wq, &wi, 1);
r = writequeue_deq(&rwfc->writequeue, &rwfc->mutex, &ctpair);
if (r == EINVAL) { if (r == EINVAL) {
r = toku_pthread_mutex_unlock(&rwfc->mutex); assert(r == 0); assert(wi == 0);
break; break;
} }
if (2*rwfc->current-- > rwfc->limit && 2*rwfc->current <= rwfc->limit) {
writequeue_wakeup_write(&rwfc->writequeue);
}
r = toku_pthread_mutex_unlock(&rwfc->mutex); assert(r == 0);
destroy_pair(ctpair);
usleep(random() % 100); usleep(random() % 100);
wi->f(wi);
} }
return arg; return arg;
} }
static void static void
test_flow_control (int limit, int n) { test_flow_control (int limit, int n, int maxthreads) {
if (verbose) printf("%s:%d\n", __FUNCTION__, __LINE__);
struct rwfc my_rwfc, *rwfc = &my_rwfc; struct rwfc my_rwfc, *rwfc = &my_rwfc;
int r; THREADPOOL tp;
int i;
rwfc_init(rwfc, limit); rwfc_init(rwfc, limit);
toku_pthread_t tid; threadpool_create(&tp, maxthreads);
r = toku_pthread_create(&tid, 0, rwfc_reader, rwfc); assert(r == 0); for (i=0; i<maxthreads; i++)
threadpool_maybe_add(tp, rwfc_worker, &rwfc->workqueue);
sleep(1); // this is here to block the reader on the first deq sleep(1); // this is here to block the reader on the first deq
int i;
for (i=0; i<n; i++) { for (i=0; i<n; i++) {
PAIR ctpair = new_pair(); WORKITEM wi = new_workitem();
r = toku_pthread_mutex_lock(&rwfc->mutex); assert(r == 0); workitem_init(wi, rwfc_do_read, rwfc);
writequeue_enq(&rwfc->writequeue, ctpair); workqueue_lock(&rwfc->workqueue);
workqueue_enq(&rwfc->workqueue, wi, 0);
rwfc->current++; rwfc->current++;
while (rwfc->current >= rwfc->limit) { while (rwfc->current >= rwfc->limit) {
// printf("%d - %d %d\n", i, rwfc->current, rwfc->limit); // printf("%d - %d %d\n", i, rwfc->current, rwfc->limit);
writequeue_wait_write(&rwfc->writequeue, &rwfc->mutex); workqueue_wait_write(&rwfc->workqueue, 0);
} }
r = toku_pthread_mutex_unlock(&rwfc->mutex); assert(r == 0); workqueue_unlock(&rwfc->workqueue);
// toku_os_usleep(random() % 1); // toku_os_usleep(random() % 1);
} }
writequeue_set_closed(&rwfc->writequeue); workqueue_set_closed(&rwfc->workqueue, 1);
void *ret; threadpool_destroy(&tp);
r = toku_pthread_join(tid, &ret); assert(r == 0);
rwfc_destroy(rwfc); rwfc_destroy(rwfc);
} }
int int
test_main(int argc, const char *argv[]) { main(int argc, const char *argv[]) {
default_parse_args(argc, argv); int i;
for (i=1; i<argc; i++) {
const char *arg = argv[i];
if (strcmp(arg, "-v") == 0)
verbose++;
}
test_create_destroy(); test_create_destroy();
test_simple_enq_deq(0); test_simple_enq_deq(0);
test_simple_enq_deq(42); test_simple_enq_deq(42);
test_set_closed(); test_set_closed();
test_set_closed_thread(); test_set_closed_thread();
test_flow_control(8, 10000); test_flow_control(8, 10000, 1);
test_flow_control(8, 10000, 2);
test_flow_control(8, 10000, 17);
return 0; return 0;
} }
#include "includes.h" /* -*- mode: C; c-basic-offset: 4 -*- */
#include <stdio.h>
#include <errno.h>
#include "toku_portability.h"
#include "toku_pthread.h"
#include "toku_assert.h"
#include "memory.h"
#include "threadpool.h"
struct threadpool { struct threadpool {
int max_threads; int max_threads;
int current_threads; int current_threads;
toku_pthread_t pids[]; toku_pthread_t tids[];
}; };
int threadpool_create(THREADPOOL *threadpoolptr, int max_threads) { int threadpool_create(THREADPOOL *threadpoolptr, int max_threads) {
...@@ -15,7 +23,7 @@ int threadpool_create(THREADPOOL *threadpoolptr, int max_threads) { ...@@ -15,7 +23,7 @@ int threadpool_create(THREADPOOL *threadpoolptr, int max_threads) {
threadpool->current_threads = 0; threadpool->current_threads = 0;
int i; int i;
for (i=0; i<max_threads; i++) for (i=0; i<max_threads; i++)
threadpool->pids[i] = 0; threadpool->tids[i] = 0;
*threadpoolptr = threadpool; *threadpoolptr = threadpool;
return 0; return 0;
} }
...@@ -25,7 +33,7 @@ void threadpool_destroy(THREADPOOL *threadpoolptr) { ...@@ -25,7 +33,7 @@ void threadpool_destroy(THREADPOOL *threadpoolptr) {
int i; int i;
for (i=0; i<threadpool->current_threads; i++) { for (i=0; i<threadpool->current_threads; i++) {
int r; void *ret; int r; void *ret;
r = toku_pthread_join(threadpool->pids[i], &ret); r = toku_pthread_join(threadpool->tids[i], &ret);
assert(r == 0); assert(r == 0);
} }
*threadpoolptr = 0; *threadpoolptr = 0;
...@@ -34,7 +42,7 @@ void threadpool_destroy(THREADPOOL *threadpoolptr) { ...@@ -34,7 +42,7 @@ void threadpool_destroy(THREADPOOL *threadpoolptr) {
void threadpool_maybe_add(THREADPOOL threadpool, void *(*f)(void *), void *arg) { void threadpool_maybe_add(THREADPOOL threadpool, void *(*f)(void *), void *arg) {
if (threadpool->current_threads < threadpool->max_threads) { if (threadpool->current_threads < threadpool->max_threads) {
int r = toku_pthread_create(&threadpool->pids[threadpool->current_threads], 0, f, arg); int r = toku_pthread_create(&threadpool->tids[threadpool->current_threads], 0, f, arg);
if (r == 0) { if (r == 0) {
threadpool->current_threads++; threadpool->current_threads++;
} }
......
/* -*- mode: C; c-basic-offset: 4 -*- */
#ifndef THREADPOOL_H #ifndef THREADPOOL_H
#define THREADPOOL_H #define THREADPOOL_H
......
/* -*- mode: C; c-basic-offset: 4 -*- */
#include <stdio.h>
#include <errno.h>
#include "toku_portability.h"
#include "toku_assert.h"
#include "toku_os.h"
#include "toku_pthread.h"
#include "workqueue.h"
#include "threadpool.h"
#include "toku_worker.h"
// Create fixed number of worker threads, all waiting on a single queue
// of work items (WORKQUEUE).
void toku_init_workers(WORKQUEUE wq, THREADPOOL *tpptr) {
workqueue_init(wq);
int nprocs = toku_os_get_number_active_processors();
threadpool_create(tpptr, nprocs);
int i;
for (i=0; i<nprocs; i++)
threadpool_maybe_add(*tpptr, toku_worker, wq);
}
void toku_destroy_workers(WORKQUEUE wq, THREADPOOL *tpptr) {
workqueue_set_closed(wq, 1); // close the work queue and [see "A" in toku_worker()]
threadpool_destroy(tpptr); // wait for all of the worker threads to exit
workqueue_destroy(wq);
}
void *toku_worker(void *arg) {
// printf("%lu:%s:start %p\n", toku_pthread_self(), __FUNCTION__, arg);
WORKQUEUE wq = arg;
int r;
while (1) {
WORKITEM wi = 0;
r = workqueue_deq(wq, &wi, 1); // get work from the queue, block if empty
if (r != 0) // shut down worker threads when work queue is closed
break; // [see "A" in toku_destroy_workers() ]
wi->f(wi); // call the work handler function
}
// printf("%lu:%s:exit %p\n", toku_pthread_self(), __FUNCTION__, arg);
return arg;
}
/* -*- mode: C; c-basic-offset: 4 -*- */
#ifndef _TOKU_WORKER_H
#define _TOKU_WORKER_H
// initialize the work queue and worker threads
void toku_init_workers(WORKQUEUE wq, THREADPOOL *tpptr);
// destroy the work queue and worker threads
void toku_destroy_workers(WORKQUEUE wq, THREADPOOL *tpptr);
// this is the thread function for the worker threads in the worker thread
// pool. the arg is a pointer to the work queue that feeds work to the
// workers.
void *toku_worker(void *arg);
#endif
/* -*- mode: C; c-basic-offset: 4 -*- */
#ifndef _TOKU_WORKQUEUE_H
#define _TOKU_WORKQUEUE_H
#include <errno.h>
#include "toku_assert.h"
#include "toku_pthread.h"
struct workitem;
// A work function is called by a worker thread when the workitem (see below) is being handled
// by a worker thread.
typedef void (*WORKFUNC)(struct workitem *wi);
// A workitem contains the function that is called by a worker thread in a threadpool.
// A workitem is queued in a workqueue.
typedef struct workitem *WORKITEM;
struct workitem {
WORKFUNC f;
void *arg;
struct workitem *next;
};
// Initialize a workitem with a function and argument
static inline void workitem_init(WORKITEM wi, WORKFUNC f, void *arg) {
wi->f = f;
wi->arg = arg;
wi->next = 0;
}
// Access the workitem function
static inline WORKFUNC workitem_func(WORKITEM wi) {
return wi->f;
}
// Access the workitem argument
static inline void *workitem_arg(WORKITEM wi) {
return wi->arg;
}
// A workqueue is currently a fifo of workitems that feeds a thread pool. We may
// divide the workqueue into per worker thread queues.
typedef struct workqueue *WORKQUEUE;
struct workqueue {
WORKITEM head, tail; // list of workitem's
toku_pthread_mutex_t lock;
toku_pthread_cond_t wait_read; // wait for read
int want_read; // number of threads waiting to read
toku_pthread_cond_t wait_write; // wait for write
int want_write; // number of threads waiting to write
char closed; // kicks waiting threads off of the write queue
};
// Get a pointer to the workqueue lock. This is used by workqueue client software
// that wants to control the workqueue locking.
static inline toku_pthread_mutex_t *workqueue_lock_ref(WORKQUEUE wq) {
return &wq->lock;
}
// Lock the workqueue
static inline void workqueue_lock(WORKQUEUE wq) {
int r = toku_pthread_mutex_lock(&wq->lock); assert(r == 0);
}
// Unlock the workqueue
static inline void workqueue_unlock(WORKQUEUE wq) {
int r = toku_pthread_mutex_unlock(&wq->lock); assert(r == 0);
}
// Initialize a workqueue
// Expects: the workqueue is not initialized
// Effects: the workqueue is set to empty and the condition variable is initialized
__attribute__((unused))
static void workqueue_init(WORKQUEUE wq) {
int r;
r = toku_pthread_mutex_init(&wq->lock, 0); assert(r == 0);
wq->head = wq->tail = 0;
r = toku_pthread_cond_init(&wq->wait_read, 0); assert(r == 0);
wq->want_read = 0;
r = toku_pthread_cond_init(&wq->wait_write, 0); assert(r == 0);
wq->want_write = 0;
wq->closed = 0;
}
// Destroy a work queue
// Expects: the work queue must be initialized and empty
__attribute__((unused))
static void workqueue_destroy(WORKQUEUE wq) {
int r;
workqueue_lock(wq); // shutup helgrind
assert(wq->head == 0 && wq->tail == 0);
workqueue_unlock(wq);
r = toku_pthread_cond_destroy(&wq->wait_read); assert(r == 0);
r = toku_pthread_cond_destroy(&wq->wait_write); assert(r == 0);
r = toku_pthread_mutex_destroy(&wq->lock); assert(r == 0);
}
// Close the work queue
// Effects: signal any threads blocked in the work queue
__attribute__((unused))
static void workqueue_set_closed(WORKQUEUE wq, int dolock) {
int r;
if (dolock) workqueue_lock(wq);
wq->closed = 1;
if (dolock) workqueue_unlock(wq);
r = toku_pthread_cond_broadcast(&wq->wait_read); assert(r == 0);
r = toku_pthread_cond_broadcast(&wq->wait_write); assert(r == 0);
}
// Determine whether or not the work queue is empty
// Returns: 1 if the work queue is empty, otherwise 0
static inline int workqueue_empty(WORKQUEUE wq) {
return wq->head == 0;
}
// Put a work item at the tail of the work queue
// Effects: append the work item to the end of the work queue and signal
// any work queue readers.
// Dolock controls whether or not the work queue lock should be taken.
__attribute__((unused))
static void workqueue_enq(WORKQUEUE wq, WORKITEM wi, int dolock) {
if (dolock) workqueue_lock(wq);
wi->next = 0;
if (wq->tail)
wq->tail->next = wi;
else
wq->head = wi;
wq->tail = wi;
if (wq->want_read) {
int r = toku_pthread_cond_signal(&wq->wait_read); assert(r == 0);
}
if (dolock) workqueue_unlock(wq);
}
// Get a work item from the head of the work queue
// Effects: wait until the workqueue is not empty, remove the first workitem from the
// queue and return it.
// Dolock controls whether or not the work queue lock should be taken.
// Success: returns 0 and set the wiptr
// Failure: returns non-zero
__attribute__((unused))
static int workqueue_deq(WORKQUEUE wq, WORKITEM *wiptr, int dolock) {
if (dolock) workqueue_lock(wq);
while (workqueue_empty(wq)) {
if (wq->closed) {
if (dolock) workqueue_unlock(wq);
return EINVAL;
}
wq->want_read++;
int r = toku_pthread_cond_wait(&wq->wait_read, &wq->lock); assert(r == 0);
wq->want_read--;
}
WORKITEM wi = wq->head;
wq->head = wi->next;
if (wq->head == 0)
wq->tail = 0;
wi->next = 0;
if (dolock) workqueue_unlock(wq);
*wiptr = wi;
return 0;
}
// Suspend a work queue writer thread
__attribute__((unused))
static void workqueue_wait_write(WORKQUEUE wq, int dolock) {
if (dolock) workqueue_lock(wq);
wq->want_write++;
int r = toku_pthread_cond_wait(&wq->wait_write, &wq->lock); assert(r == 0);
wq->want_write--;
if (dolock) workqueue_unlock(wq);
}
// Wakeup the waiting work queue writer threads
__attribute__((unused))
static void workqueue_wakeup_write(WORKQUEUE wq, int dolock) {
if (wq->want_write) {
if (dolock) workqueue_lock(wq);
if (wq->want_write) {
int r = toku_pthread_cond_broadcast(&wq->wait_write); assert(r == 0);
}
if (dolock) workqueue_unlock(wq);
}
}
#endif
...@@ -21,7 +21,7 @@ void toku_do_assert(int,const char*/*expr_as_string*/,const char */*fun*/,const ...@@ -21,7 +21,7 @@ void toku_do_assert(int,const char*/*expr_as_string*/,const char */*fun*/,const
#define WHEN_NOT_GCOV(x) #define WHEN_NOT_GCOV(x)
#endif #endif
#undef assert
#ifdef SLOW_ASSERT #ifdef SLOW_ASSERT
#define assert(expr) toku_do_assert((expr) != 0, #expr, __FUNCTION__, __FILE__, __LINE__) #define assert(expr) toku_do_assert((expr) != 0, #expr, __FUNCTION__, __FILE__, __LINE__)
#else #else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment