Commit 350c0489 authored by Zardosht Kasheff's avatar Zardosht Kasheff Committed by Yoni Fogel

[t:4314], protect calling of cleaner_callback with fdlock

git-svn-id: file:///svn/toku/tokudb@38207 c7de825b-a66e-492c-adef-691d508d4ae1
parent eee52c17
......@@ -3778,6 +3778,9 @@ toku_cleaner_thread (void *cachetable_v)
cachetable_unlock(ct);
break;
}
// here we select a PAIR for cleaning
// look at some number of PAIRS, and
// pick what we think is the best one for cleaning
do {
if (nb_mutex_users(&ct->cleaner_head->nb_mutex) > 0 || ct->cleaner_head->cachefile->is_flushing) {
goto next_pair;
......@@ -3791,6 +3794,10 @@ toku_cleaner_thread (void *cachetable_v)
next_pair:
ct->cleaner_head = ct->cleaner_head->clock_next;
} while (ct->cleaner_head != first_pair && n_seen < CLEANER_N_TO_CHECK);
//
// at this point, if we have found a PAIR for cleaning,
// that is, best_pair != NULL, we do the clean
//
if (best_pair) {
nb_mutex_write_lock(&best_pair->nb_mutex, ct->mutex);
// the order of operations for these two pieces is important
......@@ -3807,38 +3814,56 @@ toku_cleaner_thread (void *cachetable_v)
if (best_pair->checkpoint_pending) {
write_locked_pair_for_checkpoint(ct, best_pair);
}
}
cachetable_unlock(ct);
if (best_pair) {
CACHEFILE cf = best_pair->cachefile;
BOOL cleaner_callback_called = FALSE;
// grab the fdlock, because the cleaner callback
// will access the fd.
rwlock_prefer_read_lock(&cf->fdlock, ct->mutex);
// it's theoretically possible that after writing a PAIR for checkpoint, the
// PAIR's heuristic tells us nothing needs to be done. It is not possible
// in Dr. Noga, but unit tests verify this behavior works properly.
CACHEFILE cf = best_pair->cachefile;
if (cleaner_thread_rate_pair(best_pair) > 0) {
//
// also, because the cleaner thread needs to act as a client
// and honor the same invariants that client threads honor,
// we refuse to call the cleaner callback if the cachefile
// has been redirected to /dev/null, because client threads
// do not call APIs that access the file if the file has been
// redirected to /dev/null
if (!toku_cachefile_is_dev_null_unlocked(cf) &&
cleaner_thread_rate_pair(best_pair) > 0)
{
cachetable_unlock(ct);
int r = best_pair->cleaner_callback(best_pair->value,
best_pair->key,
best_pair->fullhash,
best_pair->write_extraargs);
assert_zero(r);
// The cleaner callback must have unlocked the pair, so we
// don't need to unlock it here.
}
else {
cleaner_callback_called = TRUE;
cachetable_lock(ct);
}
// The cleaner callback must have unlocked the pair, so we
// don't need to unlock it if the cleaner callback is called.
if (!cleaner_callback_called) {
nb_mutex_write_unlock(&best_pair->nb_mutex);
cachetable_unlock(ct);
}
rwlock_read_unlock(&cf->fdlock);
// We need to make sure the cachefile sticks around so a close
// can't come destroy it. That's the purpose of this
// "add/remove_background_job" business, which means the
// cachefile is still valid here, even though the cleaner
// callback unlocks the pair.
remove_background_job(cf, false);
} else {
remove_background_job(cf, true);
cachetable_unlock(ct);
}
else {
cachetable_unlock(ct);
// If we didn't find anything this time around the cachetable,
// we probably won't find anything if we run around again, so
// just break out now and we'll try again when the cleaner
// thread runs again.
// just break out from the for-loop now and
// we'll try again when the cleaner thread runs again.
break;
}
}
......
#ident "$Id: cachetable-simple-verify.c 36689 2011-11-07 22:08:05Z zardosht $"
#ident "Copyright (c) 2007-2011 Tokutek Inc. All rights reserved."
#include "includes.h"
#include "test.h"
CACHEFILE f1;
static void
flush (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
void *v __attribute__((__unused__)),
void *e __attribute__((__unused__)),
PAIR_ATTR s __attribute__((__unused__)),
PAIR_ATTR* new_size __attribute__((__unused__)),
BOOL w __attribute__((__unused__)),
BOOL keep __attribute__((__unused__)),
BOOL c __attribute__((__unused__))
) {
/* Do nothing */
if (verbose) { printf("FLUSH: %d\n", (int)k.b); }
//usleep (5*1024*1024);
PAIR_ATTR attr = make_pair_attr(8);
attr.cache_pressure_size = 8;
*new_size = attr;
if (w) {
assert(c);
}
}
static int
fetch (CACHEFILE f __attribute__((__unused__)),
int UU(fd),
CACHEKEY k __attribute__((__unused__)),
u_int32_t fullhash __attribute__((__unused__)),
void **value __attribute__((__unused__)),
PAIR_ATTR *sizep __attribute__((__unused__)),
int *dirtyp,
void *extraargs __attribute__((__unused__))
) {
*dirtyp = 0;
*value = NULL;
*sizep = make_pair_attr(8);
return 0;
}
static int
cleaner_callback(
void* UU(brtnode_pv),
BLOCKNUM UU(blocknum),
u_int32_t UU(fullhash),
void* UU(extraargs)
)
{
assert(FALSE);
return 0;
}
static void
cachetable_test (void) {
const int test_limit = 12;
int r;
CACHETABLE ct;
r = toku_create_cachetable(&ct, test_limit, ZERO_LSN, NULL_LOGGER); assert(r == 0);
char fname1[] = __FILE__ "test1.dat";
unlink(fname1);
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
void* v1;
//void* v2;
long s1;
//long s2;
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, flush, fetch, def_pe_est_callback, def_pe_callback, def_pf_req_callback, def_pf_callback, cleaner_callback, NULL, NULL);
PAIR_ATTR attr = make_pair_attr(8);
attr.cache_pressure_size = 8;
r = toku_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, attr);
// test that once we have redirected to /dev/null,
// cleaner callback is NOT called
r = toku_cachefile_redirect_nullfd(f1);
assert_zero(r);
toku_cleaner_thread(ct);
toku_cachetable_verify(ct);
r = toku_cachefile_close(&f1, 0, FALSE, ZERO_LSN); assert(r == 0 && f1 == 0);
r = toku_cachetable_close(&ct); lazy_assert_zero(r);
}
int
test_main(int argc, const char *argv[]) {
default_parse_args(argc, argv);
cachetable_test();
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment