Commit 10f6b5c7 authored by Leif Walsh's avatar Leif Walsh Committed by Yoni Fogel

refs #5418 merge promotion to main

git-svn-id: file:///svn/toku/tokudb@49697 c7de825b-a66e-492c-adef-691d508d4ae1
parent 3571ca4b
...@@ -8,6 +8,7 @@ list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE ...@@ -8,6 +8,7 @@ list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE
ft/ft_loader-test-extractor-2 ft/ft_loader-test-extractor-2
ft/ft_loader-test-extractor-3 ft/ft_loader-test-extractor-3
ft/upgrade_test_simple ft/upgrade_test_simple
portability/test-cache-line-boundary-fails
portability/try-leak-lost portability/try-leak-lost
portability/try-leak-reachable portability/try-leak-reachable
portability/try-leak-uninit portability/try-leak-uninit
......
...@@ -7,7 +7,12 @@ if(USE_BDB) ...@@ -7,7 +7,12 @@ if(USE_BDB)
find_package(BDB REQUIRED) find_package(BDB REQUIRED)
endif() endif()
find_package(Valgrind REQUIRED) option(USE_VALGRIND "Build to run safely under valgrind (often slower)." ON)
if(USE_VALGRIND)
find_package(Valgrind REQUIRED)
endif()
option(TOKU_DEBUG_PARANOID "Enable paranoid asserts." ON)
include(CheckIncludeFiles) include(CheckIncludeFiles)
......
...@@ -100,6 +100,9 @@ endif () ...@@ -100,6 +100,9 @@ endif ()
## this hits with optimized builds somewhere in ftleaf_split, we don't ## this hits with optimized builds somewhere in ftleaf_split, we don't
## know why but we don't think it's a big deal ## know why but we don't think it's a big deal
set_cflags_if_supported(
-Wno-error=strict-overflow
)
set_ldflags_if_supported( set_ldflags_if_supported(
-Wno-error=strict-overflow -Wno-error=strict-overflow
) )
...@@ -122,11 +125,6 @@ else () ...@@ -122,11 +125,6 @@ else ()
set(CMAKE_SHARED_LINKER_FLAGS "-g -fuse-linker-plugin ${CMAKE_SHARED_LINKER_FLAGS}") set(CMAKE_SHARED_LINKER_FLAGS "-g -fuse-linker-plugin ${CMAKE_SHARED_LINKER_FLAGS}")
endif () endif ()
option(USE_VALGRIND "Do not pass NVALGRIND to the compiler, because valgrind will be run on the generated executables." ON)
if (NOT USE_VALGRIND)
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_RELEASE NVALGRIND=1)
endif ()
## set warnings ## set warnings
set_cflags_if_supported( set_cflags_if_supported(
-Wextra -Wextra
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "tokudb_common_funcs.h" #include "tokudb_common_funcs.h"
#include <toku_pthread.h> #include <toku_pthread.h>
#include <toku_assert.h> #include <toku_assert.h>
#include <portability/toku_atomic.h>
#include <db.h> #include <db.h>
#include <errno.h> #include <errno.h>
#include <stdlib.h> #include <stdlib.h>
...@@ -102,7 +103,7 @@ static void test_begin_commit(int _nqueries) { ...@@ -102,7 +103,7 @@ static void test_begin_commit(int _nqueries) {
r = c->c_get(c, &key, &val, DB_SET); r = c->c_get(c, &key, &val, DB_SET);
#endif #endif
assert_zero(r); assert_zero(r);
(void) __sync_fetch_and_add(&set_count, 1); (void) toku_sync_fetch_and_add(&set_count, 1);
r = c->c_close(c); assert_zero(r); r = c->c_close(c); assert_zero(r);
r = txn->commit(txn, 0); assert_zero(r); r = txn->commit(txn, 0); assert_zero(r);
} }
......
This diff is collapsed.
...@@ -134,6 +134,7 @@ struct ctpair { ...@@ -134,6 +134,7 @@ struct ctpair {
CACHETABLE_PARTIAL_EVICTION_CALLBACK pe_callback; CACHETABLE_PARTIAL_EVICTION_CALLBACK pe_callback;
CACHETABLE_CLEANER_CALLBACK cleaner_callback; CACHETABLE_CLEANER_CALLBACK cleaner_callback;
CACHETABLE_CLONE_CALLBACK clone_callback; CACHETABLE_CLONE_CALLBACK clone_callback;
CACHETABLE_CHECKPOINT_COMPLETE_CALLBACK checkpoint_complete_callback;
void *write_extraargs; void *write_extraargs;
// access to these fields are protected by disk_nb_mutex // access to these fields are protected by disk_nb_mutex
...@@ -384,7 +385,7 @@ public: ...@@ -384,7 +385,7 @@ public:
uint64_t reserve_memory(double fraction); uint64_t reserve_memory(double fraction);
void release_reserved_memory(uint64_t reserved_memory); void release_reserved_memory(uint64_t reserved_memory);
void run_eviction_thread(); void run_eviction_thread();
void do_partial_eviction(PAIR p); void do_partial_eviction(PAIR p, bool pair_mutex_held);
void evict_pair(PAIR p, bool checkpoint_pending); void evict_pair(PAIR p, bool checkpoint_pending);
void wait_for_cache_pressure_to_subside(); void wait_for_cache_pressure_to_subside();
void signal_eviction_thread(); void signal_eviction_thread();
......
This diff is collapsed.
...@@ -173,12 +173,15 @@ typedef int (*CACHETABLE_CLEANER_CALLBACK)(void *ftnode_pv, BLOCKNUM blocknum, u ...@@ -173,12 +173,15 @@ typedef int (*CACHETABLE_CLEANER_CALLBACK)(void *ftnode_pv, BLOCKNUM blocknum, u
typedef void (*CACHETABLE_CLONE_CALLBACK)(void* value_data, void** cloned_value_data, PAIR_ATTR* new_attr, bool for_checkpoint, void* write_extraargs); typedef void (*CACHETABLE_CLONE_CALLBACK)(void* value_data, void** cloned_value_data, PAIR_ATTR* new_attr, bool for_checkpoint, void* write_extraargs);
typedef void (*CACHETABLE_CHECKPOINT_COMPLETE_CALLBACK)(void *value_data);
typedef struct { typedef struct {
CACHETABLE_FLUSH_CALLBACK flush_callback; CACHETABLE_FLUSH_CALLBACK flush_callback;
CACHETABLE_PARTIAL_EVICTION_EST_CALLBACK pe_est_callback; CACHETABLE_PARTIAL_EVICTION_EST_CALLBACK pe_est_callback;
CACHETABLE_PARTIAL_EVICTION_CALLBACK pe_callback; CACHETABLE_PARTIAL_EVICTION_CALLBACK pe_callback;
CACHETABLE_CLEANER_CALLBACK cleaner_callback; CACHETABLE_CLEANER_CALLBACK cleaner_callback;
CACHETABLE_CLONE_CALLBACK clone_callback; CACHETABLE_CLONE_CALLBACK clone_callback;
CACHETABLE_CHECKPOINT_COMPLETE_CALLBACK checkpoint_complete_callback;
void* write_extraargs; // parameter for flush_callback, pe_est_callback, pe_callback, and cleaner_callback void* write_extraargs; // parameter for flush_callback, pe_est_callback, pe_callback, and cleaner_callback
} CACHETABLE_WRITE_CALLBACK; } CACHETABLE_WRITE_CALLBACK;
...@@ -366,14 +369,14 @@ int toku_cachetable_get_and_pin_nonblocking ( ...@@ -366,14 +369,14 @@ int toku_cachetable_get_and_pin_nonblocking (
UNLOCKERS unlockers UNLOCKERS unlockers
); );
int toku_cachetable_maybe_get_and_pin (CACHEFILE, CACHEKEY, uint32_t /*fullhash*/, void**); int toku_cachetable_maybe_get_and_pin (CACHEFILE, CACHEKEY, uint32_t /*fullhash*/, pair_lock_type, void**);
// Effect: Maybe get and pin a memory object. // Effect: Maybe get and pin a memory object.
// This function is similar to the get_and_pin function except that it // This function is similar to the get_and_pin function except that it
// will not attempt to fetch a memory object that is not in the cachetable or requires any kind of blocking to get it. // will not attempt to fetch a memory object that is not in the cachetable or requires any kind of blocking to get it.
// Returns: If the the item is already in memory, then return 0 and store it in the // Returns: If the the item is already in memory, then return 0 and store it in the
// void**. If the item is not in memory, then return a nonzero error number. // void**. If the item is not in memory, then return a nonzero error number.
int toku_cachetable_maybe_get_and_pin_clean (CACHEFILE, CACHEKEY, uint32_t /*fullhash*/, void**); int toku_cachetable_maybe_get_and_pin_clean (CACHEFILE, CACHEKEY, uint32_t /*fullhash*/, pair_lock_type, void**);
// Effect: Like maybe get and pin, but may pin a clean pair. // Effect: Like maybe get and pin, but may pin a clean pair.
int toku_cachetable_unpin(CACHEFILE, PAIR, enum cachetable_dirty dirty, PAIR_ATTR size); int toku_cachetable_unpin(CACHEFILE, PAIR, enum cachetable_dirty dirty, PAIR_ATTR size);
...@@ -556,5 +559,10 @@ int toku_cleaner_thread(void *cleaner_v); ...@@ -556,5 +559,10 @@ int toku_cleaner_thread(void *cleaner_v);
// The default of 1M is too high for drd tests, so this is a mechanism to set a smaller number. // The default of 1M is too high for drd tests, so this is a mechanism to set a smaller number.
void toku_pair_list_set_lock_size(uint32_t num_locks); void toku_pair_list_set_lock_size(uint32_t num_locks);
// Used by ft-ops.cc to figure out if it has the write lock on a pair.
// Pretty hacky and not accurate enough, should be improved at the frwlock
// layer.
__attribute__((const,nonnull))
bool toku_ctpair_is_write_locked(PAIR pair);
#endif /* CACHETABLE_H */ #endif /* CACHETABLE_H */
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
#include "log-internal.h" #include "log-internal.h"
#include "logger.h" #include "logger.h"
#include "checkpoint.h" #include "checkpoint.h"
#include <portability/toku_atomic.h>
/////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////
// Engine status // Engine status
...@@ -173,7 +174,7 @@ checkpoint_safe_checkpoint_unlock(void) { ...@@ -173,7 +174,7 @@ checkpoint_safe_checkpoint_unlock(void) {
void void
toku_multi_operation_client_lock(void) { toku_multi_operation_client_lock(void) {
if (locked_mo) if (locked_mo)
(void) __sync_fetch_and_add(&STATUS_VALUE(CP_CLIENT_WAIT_ON_MO), 1); (void) toku_sync_fetch_and_add(&STATUS_VALUE(CP_CLIENT_WAIT_ON_MO), 1);
toku_pthread_rwlock_rdlock(&multi_operation_lock); toku_pthread_rwlock_rdlock(&multi_operation_lock);
} }
...@@ -185,7 +186,7 @@ toku_multi_operation_client_unlock(void) { ...@@ -185,7 +186,7 @@ toku_multi_operation_client_unlock(void) {
void void
toku_checkpoint_safe_client_lock(void) { toku_checkpoint_safe_client_lock(void) {
if (locked_cs) if (locked_cs)
(void) __sync_fetch_and_add(&STATUS_VALUE(CP_CLIENT_WAIT_ON_CS), 1); (void) toku_sync_fetch_and_add(&STATUS_VALUE(CP_CLIENT_WAIT_ON_CS), 1);
toku_pthread_rwlock_rdlock(&checkpoint_safe_lock); toku_pthread_rwlock_rdlock(&checkpoint_safe_lock);
toku_multi_operation_client_lock(); toku_multi_operation_client_lock();
} }
...@@ -227,9 +228,9 @@ toku_checkpoint(CHECKPOINTER cp, TOKULOGGER logger, ...@@ -227,9 +228,9 @@ toku_checkpoint(CHECKPOINTER cp, TOKULOGGER logger,
assert(initialized); assert(initialized);
(void) __sync_fetch_and_add(&STATUS_VALUE(CP_WAITERS_NOW), 1); (void) toku_sync_fetch_and_add(&STATUS_VALUE(CP_WAITERS_NOW), 1);
checkpoint_safe_checkpoint_lock(); checkpoint_safe_checkpoint_lock();
(void) __sync_fetch_and_sub(&STATUS_VALUE(CP_WAITERS_NOW), 1); (void) toku_sync_fetch_and_sub(&STATUS_VALUE(CP_WAITERS_NOW), 1);
if (STATUS_VALUE(CP_WAITERS_NOW) > STATUS_VALUE(CP_WAITERS_MAX)) if (STATUS_VALUE(CP_WAITERS_NOW) > STATUS_VALUE(CP_WAITERS_MAX))
STATUS_VALUE(CP_WAITERS_MAX) = STATUS_VALUE(CP_WAITERS_NOW); // threadsafe, within checkpoint_safe lock STATUS_VALUE(CP_WAITERS_MAX) = STATUS_VALUE(CP_WAITERS_NOW); // threadsafe, within checkpoint_safe lock
......
...@@ -25,12 +25,23 @@ static void fifo_init(struct fifo *fifo) { ...@@ -25,12 +25,23 @@ static void fifo_init(struct fifo *fifo) {
fifo->memory_used = 0; fifo->memory_used = 0;
} }
__attribute__((const,nonnull))
static int fifo_entry_size(struct fifo_entry *entry) { static int fifo_entry_size(struct fifo_entry *entry) {
return sizeof (struct fifo_entry) + entry->keylen + entry->vallen return sizeof (struct fifo_entry) + entry->keylen + entry->vallen
+ xids_get_size(&entry->xids_s) + xids_get_size(&entry->xids_s)
- sizeof(XIDS_S); //Prevent double counting from fifo_entry+xids_get_size - sizeof(XIDS_S); //Prevent double counting from fifo_entry+xids_get_size
} }
__attribute__((const,nonnull))
size_t toku_ft_msg_memsize_in_fifo(FT_MSG cmd) {
// This must stay in sync with fifo_entry_size because that's what we
// really trust. But sometimes we only have an in-memory FT_MSG, not
// a serialized fifo_entry so we have to fake it.
return sizeof (struct fifo_entry) + cmd->u.id.key->size + cmd->u.id.val->size
+ xids_get_size(cmd->xids)
- sizeof(XIDS_S);
}
int toku_fifo_create(FIFO *ptr) { int toku_fifo_create(FIFO *ptr) {
struct fifo *XMALLOC(fifo); struct fifo *XMALLOC(fifo);
if (fifo == 0) return ENOMEM; if (fifo == 0) return ENOMEM;
...@@ -112,6 +123,9 @@ int toku_fifo_iterate_internal_next(FIFO fifo, int off) { ...@@ -112,6 +123,9 @@ int toku_fifo_iterate_internal_next(FIFO fifo, int off) {
struct fifo_entry * toku_fifo_iterate_internal_get_entry(FIFO fifo, int off) { struct fifo_entry * toku_fifo_iterate_internal_get_entry(FIFO fifo, int off) {
return (struct fifo_entry *)(fifo->memory + off); return (struct fifo_entry *)(fifo->memory + off);
} }
size_t toku_fifo_internal_entry_memsize(struct fifo_entry *e) {
return fifo_entry_size(e);
}
void toku_fifo_iterate (FIFO fifo, void(*f)(bytevec key,ITEMLEN keylen,bytevec data,ITEMLEN datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, void*), void *arg) { void toku_fifo_iterate (FIFO fifo, void(*f)(bytevec key,ITEMLEN keylen,bytevec data,ITEMLEN datalen, enum ft_msg_type type, MSN msn, XIDS xids, bool is_fresh, void*), void *arg) {
FIFO_ITERATE(fifo, FIFO_ITERATE(fifo,
......
...@@ -83,11 +83,15 @@ void toku_fifo_iterate(FIFO, void(*f)(bytevec key,ITEMLEN keylen,bytevec data,IT ...@@ -83,11 +83,15 @@ void toku_fifo_iterate(FIFO, void(*f)(bytevec key,ITEMLEN keylen,bytevec data,IT
body; \ body; \
} }) } })
#define FIFO_CURRENT_ENTRY_MEMSIZE toku_fifo_internal_entry_memsize(e)
// Internal functions for the iterator. // Internal functions for the iterator.
int toku_fifo_iterate_internal_start(FIFO fifo); int toku_fifo_iterate_internal_start(FIFO fifo);
int toku_fifo_iterate_internal_has_more(FIFO fifo, int off); int toku_fifo_iterate_internal_has_more(FIFO fifo, int off);
int toku_fifo_iterate_internal_next(FIFO fifo, int off); int toku_fifo_iterate_internal_next(FIFO fifo, int off);
struct fifo_entry * toku_fifo_iterate_internal_get_entry(FIFO fifo, int off); struct fifo_entry * toku_fifo_iterate_internal_get_entry(FIFO fifo, int off);
size_t toku_fifo_internal_entry_memsize(struct fifo_entry *e) __attribute__((const,nonnull));
size_t toku_ft_msg_memsize_in_fifo(FT_MSG cmd) __attribute__((const,nonnull));
DBT *fill_dbt_for_fifo_entry(DBT *dbt, const struct fifo_entry *entry); DBT *fill_dbt_for_fifo_entry(DBT *dbt, const struct fifo_entry *entry);
struct fifo_entry *toku_fifo_get_entry(FIFO fifo, int off); struct fifo_entry *toku_fifo_get_entry(FIFO fifo, int off);
......
...@@ -147,7 +147,7 @@ try_again_for_write_lock: ...@@ -147,7 +147,7 @@ try_again_for_write_lock:
if (apply_ancestor_messages && node->height == 0) { if (apply_ancestor_messages && node->height == 0) {
needs_ancestors_messages = toku_ft_leaf_needs_ancestors_messages(brt->ft, node, ancestors, bounds, &max_msn_in_path); needs_ancestors_messages = toku_ft_leaf_needs_ancestors_messages(brt->ft, node, ancestors, bounds, &max_msn_in_path);
if (needs_ancestors_messages && needed_lock_type == PL_READ) { if (needs_ancestors_messages && needed_lock_type == PL_READ) {
toku_unpin_ftnode_read_only(brt, node); toku_unpin_ftnode_read_only(brt->ft, node);
needed_lock_type = PL_WRITE_CHEAP; needed_lock_type = PL_WRITE_CHEAP;
goto try_again_for_write_lock; goto try_again_for_write_lock;
} }
...@@ -296,14 +296,14 @@ toku_pin_ftnode_off_client_thread_batched( ...@@ -296,14 +296,14 @@ toku_pin_ftnode_off_client_thread_batched(
h, blocknum, fullhash, bfe, lock_type, num_dependent_nodes, dependent_nodes, node_p, true); h, blocknum, fullhash, bfe, lock_type, num_dependent_nodes, dependent_nodes, node_p, true);
} }
int toku_maybe_pin_ftnode_clean(FT ft, BLOCKNUM blocknum, uint32_t fullhash, FTNODE *nodep) { int toku_maybe_pin_ftnode_clean(FT ft, BLOCKNUM blocknum, uint32_t fullhash, pair_lock_type lock_type, FTNODE *nodep) {
void *node_v; void *node_v;
int r = toku_cachetable_maybe_get_and_pin_clean(ft->cf, blocknum, fullhash, &node_v); int r = toku_cachetable_maybe_get_and_pin_clean(ft->cf, blocknum, fullhash, lock_type, &node_v);
if (r != 0) { if (r != 0) {
goto cleanup; goto cleanup;
} }
CAST_FROM_VOIDP(*nodep, node_v); CAST_FROM_VOIDP(*nodep, node_v);
if ((*nodep)->height > 0) { if ((*nodep)->height > 0 && lock_type != PL_READ) {
toku_move_ftnode_messages_to_stale(ft, *nodep); toku_move_ftnode_messages_to_stale(ft, *nodep);
} }
cleanup: cleanup:
...@@ -331,14 +331,13 @@ toku_unpin_ftnode(FT ft, FTNODE node) ...@@ -331,14 +331,13 @@ toku_unpin_ftnode(FT ft, FTNODE node)
} }
void void
toku_unpin_ftnode_read_only(FT_HANDLE brt, FTNODE node) toku_unpin_ftnode_read_only(FT ft, FTNODE node)
{ {
int r = toku_cachetable_unpin( int r = toku_cachetable_unpin(
brt->ft->cf, ft->cf,
node->ct_pair, node->ct_pair,
(enum cachetable_dirty) node->dirty, (enum cachetable_dirty) node->dirty,
make_invalid_pair_attr() make_invalid_pair_attr()
); );
assert(r==0); assert(r==0);
} }
...@@ -108,7 +108,7 @@ toku_pin_ftnode_off_client_thread_and_maybe_move_messages( ...@@ -108,7 +108,7 @@ toku_pin_ftnode_off_client_thread_and_maybe_move_messages(
* This function may return a pinned ftnode to the caller, if pinning is cheap. * This function may return a pinned ftnode to the caller, if pinning is cheap.
* If the node is already locked, or is pending a checkpoint, the node is not pinned and -1 is returned. * If the node is already locked, or is pending a checkpoint, the node is not pinned and -1 is returned.
*/ */
int toku_maybe_pin_ftnode_clean(FT ft, BLOCKNUM blocknum, uint32_t fullhash, FTNODE *nodep); int toku_maybe_pin_ftnode_clean(FT ft, BLOCKNUM blocknum, uint32_t fullhash, pair_lock_type lock_type, FTNODE *nodep);
/** /**
* Batched version of toku_pin_ftnode_off_client_thread, see cachetable * Batched version of toku_pin_ftnode_off_client_thread, see cachetable
...@@ -158,6 +158,6 @@ void ...@@ -158,6 +158,6 @@ void
toku_unpin_ftnode(FT h, FTNODE node); toku_unpin_ftnode(FT h, FTNODE node);
void void
toku_unpin_ftnode_read_only(FT_HANDLE brt, FTNODE node); toku_unpin_ftnode_read_only(FT ft, FTNODE node);
#endif #endif
This diff is collapsed.
...@@ -89,6 +89,7 @@ ftleaf_split( ...@@ -89,6 +89,7 @@ ftleaf_split(
FTNODE *nodeb, FTNODE *nodeb,
DBT *splitk, DBT *splitk,
bool create_new_node, bool create_new_node,
enum split_mode split_mode,
uint32_t num_dependent_nodes, uint32_t num_dependent_nodes,
FTNODE* dependent_nodes FTNODE* dependent_nodes
); );
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <ft-cachetable-wrappers.h> #include <ft-cachetable-wrappers.h>
#include <ft-internal.h> #include <ft-internal.h>
#include <ft.h> #include <ft.h>
#include <portability/toku_atomic.h>
// Member Descirption: // Member Descirption:
// 1. highest_pivot_key - this is the key that corresponds to the // 1. highest_pivot_key - this is the key that corresponds to the
...@@ -251,7 +252,7 @@ toku_ft_hot_optimize(FT_HANDLE brt, ...@@ -251,7 +252,7 @@ toku_ft_hot_optimize(FT_HANDLE brt,
uint64_t loop_count = 0; uint64_t loop_count = 0;
MSN msn_at_start_of_hot = ZERO_MSN; // capture msn from root at MSN msn_at_start_of_hot = ZERO_MSN; // capture msn from root at
// start of HOT operation // start of HOT operation
(void) __sync_fetch_and_add(&STATUS_VALUE(FT_HOT_NUM_STARTED), 1); (void) toku_sync_fetch_and_add(&STATUS_VALUE(FT_HOT_NUM_STARTED), 1);
{ {
toku_ft_note_hot_begin(brt); toku_ft_note_hot_begin(brt);
...@@ -353,9 +354,9 @@ toku_ft_hot_optimize(FT_HANDLE brt, ...@@ -353,9 +354,9 @@ toku_ft_hot_optimize(FT_HANDLE brt,
} }
if (success) { if (success) {
(void) __sync_fetch_and_add(&STATUS_VALUE(FT_HOT_NUM_COMPLETED), 1); (void) toku_sync_fetch_and_add(&STATUS_VALUE(FT_HOT_NUM_COMPLETED), 1);
} else { } else {
(void) __sync_fetch_and_add(&STATUS_VALUE(FT_HOT_NUM_ABORTED), 1); (void) toku_sync_fetch_and_add(&STATUS_VALUE(FT_HOT_NUM_ABORTED), 1);
} }
} }
return r; return r;
......
This diff is collapsed.
This diff is collapsed.
...@@ -134,8 +134,8 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) ...@@ -134,8 +134,8 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
{ {
int r; int r;
FT ft = NULL; FT ft = NULL;
invariant(version >= FT_LAYOUT_MIN_SUPPORTED_VERSION); paranoid_invariant(version >= FT_LAYOUT_MIN_SUPPORTED_VERSION);
invariant(version <= FT_LAYOUT_VERSION); paranoid_invariant(version <= FT_LAYOUT_VERSION);
// We already know: // We already know:
// we have an rbuf representing the header. // we have an rbuf representing the header.
// The checksum has been validated // The checksum has been validated
...@@ -290,6 +290,12 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) ...@@ -290,6 +290,12 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
} }
} }
MSN max_msn_in_ft;
max_msn_in_ft = ZERO_MSN; // We'll upgrade it from the root node later if necessary
if (ft->layout_version_read_from_disk >= FT_LAYOUT_VERSION_21) {
max_msn_in_ft = rbuf_msn(rb);
}
(void) rbuf_int(rb); //Read in checksum and ignore (already verified). (void) rbuf_int(rb); //Read in checksum and ignore (already verified).
if (rb->ndone != rb->size) { if (rb->ndone != rb->size) {
fprintf(stderr, "Header size did not match contents.\n"); fprintf(stderr, "Header size did not match contents.\n");
...@@ -317,6 +323,7 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) ...@@ -317,6 +323,7 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
.basementnodesize = basementnodesize, .basementnodesize = basementnodesize,
.compression_method = compression_method, .compression_method = compression_method,
.highest_unused_msn_for_upgrade = highest_unused_msn_for_upgrade, .highest_unused_msn_for_upgrade = highest_unused_msn_for_upgrade,
.max_msn_in_ft = max_msn_in_ft,
.time_of_last_optimize_begin = time_of_last_optimize_begin, .time_of_last_optimize_begin = time_of_last_optimize_begin,
.time_of_last_optimize_end = time_of_last_optimize_end, .time_of_last_optimize_end = time_of_last_optimize_end,
.count_of_optimize_in_progress = count_of_optimize_in_progress, .count_of_optimize_in_progress = count_of_optimize_in_progress,
...@@ -335,6 +342,12 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) ...@@ -335,6 +342,12 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
goto exit; goto exit;
} }
} }
if (ft->layout_version_read_from_disk < FT_LAYOUT_VERSION_21) {
r = toku_upgrade_msn_from_root_to_header(fd, ft);
if (r != 0) {
goto exit;
}
}
invariant((uint32_t) ft->layout_version_read_from_disk == version); invariant((uint32_t) ft->layout_version_read_from_disk == version);
r = deserialize_descriptor_from(fd, ft->blocktable, &ft->descriptor, version); r = deserialize_descriptor_from(fd, ft->blocktable, &ft->descriptor, version);
...@@ -366,10 +379,12 @@ serialize_ft_min_size (uint32_t version) { ...@@ -366,10 +379,12 @@ serialize_ft_min_size (uint32_t version) {
size_t size = 0; size_t size = 0;
switch(version) { switch(version) {
case FT_LAYOUT_VERSION_21:
size += sizeof(MSN); // max_msn_in_ft
case FT_LAYOUT_VERSION_20: case FT_LAYOUT_VERSION_20:
case FT_LAYOUT_VERSION_19: case FT_LAYOUT_VERSION_19:
size += 1; // compression method size += 1; // compression method
size += sizeof(uint64_t); // highest_unused_msn_for_upgrade size += sizeof(MSN); // highest_unused_msn_for_upgrade
case FT_LAYOUT_VERSION_18: case FT_LAYOUT_VERSION_18:
size += sizeof(uint64_t); // time_of_last_optimize_begin size += sizeof(uint64_t); // time_of_last_optimize_begin
size += sizeof(uint64_t); // time_of_last_optimize_end size += sizeof(uint64_t); // time_of_last_optimize_end
...@@ -412,7 +427,7 @@ serialize_ft_min_size (uint32_t version) { ...@@ -412,7 +427,7 @@ serialize_ft_min_size (uint32_t version) {
); );
break; break;
default: default:
lazy_assert(false); abort();
} }
lazy_assert(size <= BLOCK_ALLOCATOR_HEADER_RESERVE); lazy_assert(size <= BLOCK_ALLOCATOR_HEADER_RESERVE);
...@@ -637,7 +652,7 @@ toku_deserialize_ft_from(int fd, ...@@ -637,7 +652,7 @@ toku_deserialize_ft_from(int fd,
version = version_1; version = version_1;
} }
invariant(rb); paranoid_invariant(rb);
r = deserialize_ft_versioned(fd, rb, ft, version); r = deserialize_ft_versioned(fd, rb, ft, version);
exit: exit:
...@@ -694,6 +709,7 @@ void toku_serialize_ft_to_wbuf ( ...@@ -694,6 +709,7 @@ void toku_serialize_ft_to_wbuf (
wbuf_MSN(wbuf, h->msn_at_start_of_last_completed_optimize); wbuf_MSN(wbuf, h->msn_at_start_of_last_completed_optimize);
wbuf_char(wbuf, (unsigned char) h->compression_method); wbuf_char(wbuf, (unsigned char) h->compression_method);
wbuf_MSN(wbuf, h->highest_unused_msn_for_upgrade); wbuf_MSN(wbuf, h->highest_unused_msn_for_upgrade);
wbuf_MSN(wbuf, h->max_msn_in_ft);
uint32_t checksum = x1764_finish(&wbuf->checksum); uint32_t checksum = x1764_finish(&wbuf->checksum);
wbuf_int(wbuf, checksum); wbuf_int(wbuf, checksum);
lazy_assert(wbuf->ndone == wbuf->size); lazy_assert(wbuf->ndone == wbuf->size);
......
...@@ -136,13 +136,16 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE brt, BLOCKNUM blocknum, const char ...@@ -136,13 +136,16 @@ int toku_testsetup_insert_to_leaf (FT_HANDLE brt, BLOCKNUM blocknum, const char
.u = { .id = { toku_fill_dbt(&keydbt, key, keylen), .u = { .id = { toku_fill_dbt(&keydbt, key, keylen),
toku_fill_dbt(&valdbt, val, vallen) } } }; toku_fill_dbt(&valdbt, val, vallen) } } };
static size_t zero_flow_deltas[] = { 0, 0 };
toku_ft_node_put_cmd ( toku_ft_node_put_cmd (
brt->ft->compare_fun, brt->ft->compare_fun,
brt->ft->update_fun, brt->ft->update_fun,
&brt->ft->cmp_descriptor, &brt->ft->cmp_descriptor,
node, node,
-1,
&cmd, &cmd,
true, true,
zero_flow_deltas,
NULL NULL
); );
...@@ -215,6 +218,8 @@ int toku_testsetup_insert_to_nonleaf (FT_HANDLE brt, BLOCKNUM blocknum, enum ft_ ...@@ -215,6 +218,8 @@ int toku_testsetup_insert_to_nonleaf (FT_HANDLE brt, BLOCKNUM blocknum, enum ft_
// using brt APIs. // using brt APIs.
node->max_msn_applied_to_node_on_disk = msn; node->max_msn_applied_to_node_on_disk = msn;
node->dirty = 1; node->dirty = 1;
// Also hack max_msn_in_ft
brt->ft->h->max_msn_in_ft = msn;
toku_unpin_ftnode(brt->ft, node); toku_unpin_ftnode(brt->ft, node);
return 0; return 0;
......
...@@ -245,7 +245,7 @@ toku_get_node_for_verify( ...@@ -245,7 +245,7 @@ toku_get_node_for_verify(
static int static int
toku_verify_ftnode_internal(FT_HANDLE brt, toku_verify_ftnode_internal(FT_HANDLE brt,
MSN rootmsn, MSN parentmsn, MSN rootmsn, MSN parentmsn, bool messages_exist_above,
FTNODE node, int height, FTNODE node, int height,
const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.)
const DBT *greatereq_pivot, // Everything in the subtree should be <= lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) const DBT *greatereq_pivot, // Everything in the subtree should be <= lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.)
...@@ -258,16 +258,11 @@ toku_verify_ftnode_internal(FT_HANDLE brt, ...@@ -258,16 +258,11 @@ toku_verify_ftnode_internal(FT_HANDLE brt,
//printf("%s:%d pin %p\n", __FILE__, __LINE__, node_v); //printf("%s:%d pin %p\n", __FILE__, __LINE__, node_v);
toku_assert_entire_node_in_memory(node); toku_assert_entire_node_in_memory(node);
this_msn = node->max_msn_applied_to_node_on_disk; this_msn = node->max_msn_applied_to_node_on_disk;
if (rootmsn.msn == ZERO_MSN.msn) {
assert(parentmsn.msn == ZERO_MSN.msn);
rootmsn = this_msn;
parentmsn = this_msn;
}
if (height >= 0) { if (height >= 0) {
invariant(height == node->height); // this is a bad failure if wrong invariant(height == node->height); // this is a bad failure if wrong
} }
if (node->height > 0) { if (node->height > 0 && messages_exist_above) {
VERIFY_ASSERTION((parentmsn.msn >= this_msn.msn), 0, "node msn must be descending down tree, newest messages at top"); VERIFY_ASSERTION((parentmsn.msn >= this_msn.msn), 0, "node msn must be descending down tree, newest messages at top");
} }
// Verify that all the pivot keys are in order. // Verify that all the pivot keys are in order.
...@@ -390,7 +385,7 @@ done: ...@@ -390,7 +385,7 @@ done:
// input is a pinned node, on exit, node is unpinned // input is a pinned node, on exit, node is unpinned
int int
toku_verify_ftnode (FT_HANDLE brt, toku_verify_ftnode (FT_HANDLE brt,
MSN rootmsn, MSN parentmsn, MSN rootmsn, MSN parentmsn, bool messages_exist_above,
FTNODE node, int height, FTNODE node, int height,
const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) const DBT *lesser_pivot, // Everything in the subtree should be > lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.)
const DBT *greatereq_pivot, // Everything in the subtree should be <= lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.) const DBT *greatereq_pivot, // Everything in the subtree should be <= lesser_pivot. (lesser_pivot==NULL if there is no lesser pivot.)
...@@ -402,11 +397,6 @@ toku_verify_ftnode (FT_HANDLE brt, ...@@ -402,11 +397,6 @@ toku_verify_ftnode (FT_HANDLE brt,
//printf("%s:%d pin %p\n", __FILE__, __LINE__, node_v); //printf("%s:%d pin %p\n", __FILE__, __LINE__, node_v);
toku_assert_entire_node_in_memory(node); toku_assert_entire_node_in_memory(node);
this_msn = node->max_msn_applied_to_node_on_disk; this_msn = node->max_msn_applied_to_node_on_disk;
if (rootmsn.msn == ZERO_MSN.msn) {
assert(parentmsn.msn == ZERO_MSN.msn);
rootmsn = this_msn;
parentmsn = this_msn;
}
int result = 0; int result = 0;
int result2 = 0; int result2 = 0;
...@@ -414,7 +404,7 @@ toku_verify_ftnode (FT_HANDLE brt, ...@@ -414,7 +404,7 @@ toku_verify_ftnode (FT_HANDLE brt,
// Otherwise we'll just do the next call // Otherwise we'll just do the next call
result = toku_verify_ftnode_internal( result = toku_verify_ftnode_internal(
brt, rootmsn, parentmsn, node, height, lesser_pivot, greatereq_pivot, brt, rootmsn, parentmsn, messages_exist_above, node, height, lesser_pivot, greatereq_pivot,
verbose, keep_going_on_failure, false); verbose, keep_going_on_failure, false);
if (result != 0 && (!keep_going_on_failure || result != TOKUDB_NEEDS_REPAIR)) goto done; if (result != 0 && (!keep_going_on_failure || result != TOKUDB_NEEDS_REPAIR)) goto done;
} }
...@@ -422,7 +412,7 @@ toku_verify_ftnode (FT_HANDLE brt, ...@@ -422,7 +412,7 @@ toku_verify_ftnode (FT_HANDLE brt,
toku_move_ftnode_messages_to_stale(brt->ft, node); toku_move_ftnode_messages_to_stale(brt->ft, node);
} }
result2 = toku_verify_ftnode_internal( result2 = toku_verify_ftnode_internal(
brt, rootmsn, parentmsn, node, height, lesser_pivot, greatereq_pivot, brt, rootmsn, parentmsn, messages_exist_above, node, height, lesser_pivot, greatereq_pivot,
verbose, keep_going_on_failure, true); verbose, keep_going_on_failure, true);
if (result == 0) { if (result == 0) {
result = result2; result = result2;
...@@ -434,7 +424,7 @@ toku_verify_ftnode (FT_HANDLE brt, ...@@ -434,7 +424,7 @@ toku_verify_ftnode (FT_HANDLE brt,
for (int i = 0; i < node->n_children; i++) { for (int i = 0; i < node->n_children; i++) {
FTNODE child_node; FTNODE child_node;
toku_get_node_for_verify(BP_BLOCKNUM(node, i), brt, &child_node); toku_get_node_for_verify(BP_BLOCKNUM(node, i), brt, &child_node);
int r = toku_verify_ftnode(brt, rootmsn, this_msn, int r = toku_verify_ftnode(brt, rootmsn, this_msn, messages_exist_above || toku_bnc_n_entries(BNC(node, i)) > 0,
child_node, node->height-1, child_node, node->height-1,
(i==0) ? lesser_pivot : &node->childkeys[i-1], (i==0) ? lesser_pivot : &node->childkeys[i-1],
(i==node->n_children-1) ? greatereq_pivot : &node->childkeys[i], (i==node->n_children-1) ? greatereq_pivot : &node->childkeys[i],
...@@ -465,7 +455,7 @@ toku_verify_ft_with_progress (FT_HANDLE brt, int (*progress_callback)(void *extr ...@@ -465,7 +455,7 @@ toku_verify_ft_with_progress (FT_HANDLE brt, int (*progress_callback)(void *extr
toku_calculate_root_offset_pointer(brt->ft, &root_key, &root_hash); toku_calculate_root_offset_pointer(brt->ft, &root_key, &root_hash);
toku_get_node_for_verify(root_key, brt, &root_node); toku_get_node_for_verify(root_key, brt, &root_node);
} }
int r = toku_verify_ftnode(brt, ZERO_MSN, ZERO_MSN, root_node, -1, NULL, NULL, progress_callback, progress_extra, 1, verbose, keep_on_going); int r = toku_verify_ftnode(brt, brt->ft->h->max_msn_in_ft, brt->ft->h->max_msn_in_ft, false, root_node, -1, NULL, NULL, progress_callback, progress_extra, 1, verbose, keep_on_going);
if (r == 0) { if (r == 0) {
toku_ft_lock(brt->ft); toku_ft_lock(brt->ft);
brt->ft->h->time_of_last_verification = time(NULL); brt->ft->h->time_of_last_verification = time(NULL);
...@@ -479,4 +469,3 @@ int ...@@ -479,4 +469,3 @@ int
toku_verify_ft (FT_HANDLE brt) { toku_verify_ft (FT_HANDLE brt) {
return toku_verify_ft_with_progress(brt, NULL, NULL, 0, 0); return toku_verify_ft_with_progress(brt, NULL, NULL, 0, 0);
} }
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <memory.h> #include <memory.h>
#include <toku_assert.h> #include <toku_assert.h>
#include <portability/toku_atomic.h>
void void
toku_ft_suppress_rollbacks(FT h, TOKUTXN txn) { toku_ft_suppress_rollbacks(FT h, TOKUTXN txn) {
...@@ -365,6 +366,7 @@ ft_header_create(FT_OPTIONS options, BLOCKNUM root_blocknum, TXNID root_xid_that ...@@ -365,6 +366,7 @@ ft_header_create(FT_OPTIONS options, BLOCKNUM root_blocknum, TXNID root_xid_that
.basementnodesize = options->basementnodesize, .basementnodesize = options->basementnodesize,
.compression_method = options->compression_method, .compression_method = options->compression_method,
.highest_unused_msn_for_upgrade = { .msn = (MIN_MSN.msn - 1) }, .highest_unused_msn_for_upgrade = { .msn = (MIN_MSN.msn - 1) },
.max_msn_in_ft = ZERO_MSN,
.time_of_last_optimize_begin = 0, .time_of_last_optimize_begin = 0,
.time_of_last_optimize_end = 0, .time_of_last_optimize_end = 0,
.count_of_optimize_in_progress = 0, .count_of_optimize_in_progress = 0,
...@@ -850,14 +852,14 @@ toku_ft_get_cmp_descriptor(FT_HANDLE ft_handle) { ...@@ -850,14 +852,14 @@ toku_ft_get_cmp_descriptor(FT_HANDLE ft_handle) {
void void
toku_ft_update_stats(STAT64INFO headerstats, STAT64INFO_S delta) { toku_ft_update_stats(STAT64INFO headerstats, STAT64INFO_S delta) {
(void) __sync_fetch_and_add(&(headerstats->numrows), delta.numrows); (void) toku_sync_fetch_and_add(&(headerstats->numrows), delta.numrows);
(void) __sync_fetch_and_add(&(headerstats->numbytes), delta.numbytes); (void) toku_sync_fetch_and_add(&(headerstats->numbytes), delta.numbytes);
} }
void void
toku_ft_decrease_stats(STAT64INFO headerstats, STAT64INFO_S delta) { toku_ft_decrease_stats(STAT64INFO headerstats, STAT64INFO_S delta) {
(void) __sync_fetch_and_sub(&(headerstats->numrows), delta.numrows); (void) toku_sync_fetch_and_sub(&(headerstats->numrows), delta.numrows);
(void) __sync_fetch_and_sub(&(headerstats->numbytes), delta.numbytes); (void) toku_sync_fetch_and_sub(&(headerstats->numbytes), delta.numbytes);
} }
void void
......
...@@ -28,6 +28,7 @@ enum ft_layout_version_e { ...@@ -28,6 +28,7 @@ enum ft_layout_version_e {
FT_LAYOUT_VERSION_20 = 20, // Deadshot: Add compression method to log_fcreate, FT_LAYOUT_VERSION_20 = 20, // Deadshot: Add compression method to log_fcreate,
// mgr_last_xid after begin checkpoint, // mgr_last_xid after begin checkpoint,
// last_xid to shutdown // last_xid to shutdown
FT_LAYOUT_VERSION_21 = 21, // Ming: Add max_msn_in_ft to header
FT_NEXT_VERSION, // the version after the current version FT_NEXT_VERSION, // the version after the current version
FT_LAYOUT_VERSION = FT_NEXT_VERSION-1, // A hack so I don't have to change this line. FT_LAYOUT_VERSION = FT_NEXT_VERSION-1, // A hack so I don't have to change this line.
FT_LAYOUT_MIN_SUPPORTED_VERSION = FT_LAYOUT_VERSION_13, // Minimum version supported FT_LAYOUT_MIN_SUPPORTED_VERSION = FT_LAYOUT_VERSION_13, // Minimum version supported
......
This diff is collapsed.
...@@ -281,6 +281,10 @@ enum reactivity { ...@@ -281,6 +281,10 @@ enum reactivity {
RE_FISSIBLE RE_FISSIBLE
}; };
enum split_mode {
SPLIT_EVENLY,
SPLIT_LEFT_HEAVY,
SPLIT_RIGHT_HEAVY
};
#endif #endif
...@@ -179,7 +179,7 @@ int ...@@ -179,7 +179,7 @@ int
toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry, // NULL if there was no stored data. toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry, // NULL if there was no stored data.
size_t *new_leafentry_memorysize, size_t *new_leafentry_memorysize,
LEAFENTRY *new_leafentry_p, LEAFENTRY *new_leafentry_p,
OMT omt, OMT *omtp,
struct mempool *mp); struct mempool *mp);
......
...@@ -34,6 +34,7 @@ lfence: 12.9ns/loop (marginal cost= -0.1ns) ...@@ -34,6 +34,7 @@ lfence: 12.9ns/loop (marginal cost= -0.1ns)
#include <sys/time.h> #include <sys/time.h>
#include <stdio.h> #include <stdio.h>
#include <portability/toku_atomic.h>
enum { COUNT = 100000000 }; enum { COUNT = 100000000 };
...@@ -67,8 +68,8 @@ static inline void sfence (void) { ...@@ -67,8 +68,8 @@ static inline void sfence (void) {
int lock_for_lock_and_unlock; int lock_for_lock_and_unlock;
static inline void lock_and_unlock (void) { static inline void lock_and_unlock (void) {
(void)__sync_lock_test_and_set(&lock_for_lock_and_unlock, 1); (void)toku_sync_lock_test_and_set(&lock_for_lock_and_unlock, 1);
__sync_lock_release(&lock_for_lock_and_unlock); toku_sync_lock_release(&lock_for_lock_and_unlock);
} }
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <stdio.h> #include <stdio.h>
#include <sys/time.h> #include <sys/time.h>
#include <pthread.h> #include <pthread.h>
#include <portability/toku_atomic.h>
float tdiff (struct timeval *start, struct timeval *end) { float tdiff (struct timeval *start, struct timeval *end) {
return 1e6*(end->tv_sec-start->tv_sec) +(end->tv_usec - start->tv_usec); return 1e6*(end->tv_sec-start->tv_sec) +(end->tv_usec - start->tv_usec);
...@@ -71,13 +72,13 @@ fetch_and_add_i (volatile int *p, int incr) ...@@ -71,13 +72,13 @@ fetch_and_add_i (volatile int *p, int incr)
static inline int static inline int
gcc_fetch_and_add_i (volatile int *p, int incr) gcc_fetch_and_add_i (volatile int *p, int incr)
{ {
return __sync_fetch_and_add(p, incr); return toku_sync_fetch_and_add(p, incr);
} }
static inline long static inline long
gcc_fetch_and_add_l (volatile long *p, long incr) gcc_fetch_and_add_l (volatile long *p, long incr)
{ {
return __sync_fetch_and_add(p, incr); return toku_sync_fetch_and_add(p, incr);
} }
// Something wrong with the compiler for longs // Something wrong with the compiler for longs
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <sys/time.h> #include <sys/time.h>
#include <unistd.h> #include <unistd.h>
#include <rdtsc.h> #include <rdtsc.h>
#include <portability/toku_atomic.h>
float tdiff (struct timeval *start, struct timeval *end) { float tdiff (struct timeval *start, struct timeval *end) {
return 1e6*(end->tv_sec-start->tv_sec) +(end->tv_usec - start->tv_usec); return 1e6*(end->tv_sec-start->tv_sec) +(end->tv_usec - start->tv_usec);
...@@ -135,12 +136,12 @@ int main(int argc __attribute__((unused)), char **argv) ...@@ -135,12 +136,12 @@ int main(int argc __attribute__((unused)), char **argv)
{ {
static int lock_for_lock_and_unlock; static int lock_for_lock_and_unlock;
t_start = rdtsc(); t_start = rdtsc();
(void)__sync_lock_test_and_set(&lock_for_lock_and_unlock, 1); (void)toku_sync_lock_test_and_set(&lock_for_lock_and_unlock, 1);
t_end = rdtsc(); t_end = rdtsc();
printf("sync_lock_test_and_set took %llu clocks\n", t_end-t_start); printf("sync_lock_test_and_set took %llu clocks\n", t_end-t_start);
t_start = rdtsc(); t_start = rdtsc();
__sync_lock_release(&lock_for_lock_and_unlock); toku_sync_lock_release(&lock_for_lock_and_unlock);
t_end = rdtsc(); t_end = rdtsc();
printf("sync_lock_release took %llu clocks\n", t_end-t_start); printf("sync_lock_release took %llu clocks\n", t_end-t_start);
} }
...@@ -148,7 +149,7 @@ int main(int argc __attribute__((unused)), char **argv) ...@@ -148,7 +149,7 @@ int main(int argc __attribute__((unused)), char **argv)
{ {
t_start = rdtsc(); t_start = rdtsc();
(void)__sync_synchronize(); (void)toku_sync_synchronize();
t_end = rdtsc(); t_end = rdtsc();
printf("sync_synchornize took %llu clocks\n", t_end-t_start); printf("sync_synchornize took %llu clocks\n", t_end-t_start);
} }
......
...@@ -44,6 +44,7 @@ static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_rollback_log(FT ...@@ -44,6 +44,7 @@ static inline CACHETABLE_WRITE_CALLBACK get_write_callbacks_for_rollback_log(FT
wc.pe_callback = toku_rollback_pe_callback; wc.pe_callback = toku_rollback_pe_callback;
wc.cleaner_callback = toku_rollback_cleaner_callback; wc.cleaner_callback = toku_rollback_cleaner_callback;
wc.clone_callback = toku_rollback_clone_callback; wc.clone_callback = toku_rollback_clone_callback;
wc.checkpoint_complete_callback = nullptr;
wc.write_extraargs = h; wc.write_extraargs = h;
return wc; return wc;
} }
......
...@@ -84,7 +84,7 @@ void rollback_empty_log_init(ROLLBACK_LOG_NODE log) { ...@@ -84,7 +84,7 @@ void rollback_empty_log_init(ROLLBACK_LOG_NODE log) {
log->layout_version_read_from_disk = FT_LAYOUT_VERSION; log->layout_version_read_from_disk = FT_LAYOUT_VERSION;
log->dirty = true; log->dirty = true;
log->sequence = 0; log->sequence = 0;
log->previous = {0}; log->previous = make_blocknum(0);
log->previous_hash = 0; log->previous_hash = 0;
log->oldest_logentry = NULL; log->oldest_logentry = NULL;
log->newest_logentry = NULL; log->newest_logentry = NULL;
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <unistd.h> #include <unistd.h>
#include "cachetable-test.h" #include "cachetable-test.h"
#include "checkpoint.h" #include "checkpoint.h"
#include <portability/toku_atomic.h>
static int N; // how many items in the table static int N; // how many items in the table
static CACHEFILE cf; static CACHEFILE cf;
...@@ -54,9 +55,9 @@ flush ( ...@@ -54,9 +55,9 @@ flush (
int *CAST_FROM_VOIDP(v, value); int *CAST_FROM_VOIDP(v, value);
if (*v!=expect_value) printf("got %d expect %d\n", *v, expect_value); if (*v!=expect_value) printf("got %d expect %d\n", *v, expect_value);
assert(*v==expect_value); assert(*v==expect_value);
(void)__sync_fetch_and_add(&n_flush, 1); (void)toku_sync_fetch_and_add(&n_flush, 1);
if (write_me) (void)__sync_fetch_and_add(&n_write_me, 1); if (write_me) (void)toku_sync_fetch_and_add(&n_write_me, 1);
if (keep_me) (void)__sync_fetch_and_add(&n_keep_me, 1); if (keep_me) (void)toku_sync_fetch_and_add(&n_keep_me, 1);
sleep_random(); sleep_random();
} }
......
...@@ -106,7 +106,7 @@ static void cachetable_checkpoint_test(int n, enum cachetable_dirty dirty) { ...@@ -106,7 +106,7 @@ static void cachetable_checkpoint_test(int n, enum cachetable_dirty dirty) {
CACHEKEY key = make_blocknum(i); CACHEKEY key = make_blocknum(i);
uint32_t hi = toku_cachetable_hash(f1, key); uint32_t hi = toku_cachetable_hash(f1, key);
void *v; void *v;
r = toku_cachetable_maybe_get_and_pin(f1, key, hi, &v); r = toku_cachetable_maybe_get_and_pin(f1, key, hi, PL_WRITE_EXPENSIVE, &v);
if (r != 0) if (r != 0)
continue; continue;
r = toku_test_cachetable_unpin(f1, key, hi, CACHETABLE_CLEAN, make_pair_attr(item_size)); r = toku_test_cachetable_unpin(f1, key, hi, CACHETABLE_CLEAN, make_pair_attr(item_size));
......
...@@ -193,6 +193,7 @@ void checkpointer_test::add_pairs(struct cachefile *cf, ...@@ -193,6 +193,7 @@ void checkpointer_test::add_pairs(struct cachefile *cf,
attr.cache_pressure_size = 0; attr.cache_pressure_size = 0;
attr.is_valid = true; attr.is_valid = true;
CACHETABLE_WRITE_CALLBACK cb; CACHETABLE_WRITE_CALLBACK cb;
ZERO_STRUCT(cb); // All nullptr
for (uint32_t i = k; i < count + k; ++i) { for (uint32_t i = k; i < count + k; ++i) {
CACHEKEY key; CACHEKEY key;
...@@ -201,12 +202,12 @@ void checkpointer_test::add_pairs(struct cachefile *cf, ...@@ -201,12 +202,12 @@ void checkpointer_test::add_pairs(struct cachefile *cf,
pair_init(&(pairs[i]), pair_init(&(pairs[i]),
cf, cf,
key, key,
NULL, nullptr,
attr, attr,
CACHETABLE_CLEAN, CACHETABLE_CLEAN,
full_hash, full_hash,
cb, cb,
NULL, nullptr,
m_cp.m_list); m_cp.m_list);
m_cp.m_list->put(&pairs[i]); m_cp.m_list->put(&pairs[i]);
......
...@@ -27,7 +27,8 @@ cachetable_count_pinned_test (int n) { ...@@ -27,7 +27,8 @@ cachetable_count_pinned_test (int n) {
assert(toku_cachefile_count_pinned(f1, 0) == i); assert(toku_cachefile_count_pinned(f1, 0) == i);
void *v; void *v;
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, &v); r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, PL_WRITE_EXPENSIVE, &v);
assert(r == -1); assert(r == -1);
assert(toku_cachefile_count_pinned(f1, 0) == i); assert(toku_cachefile_count_pinned(f1, 0) == i);
......
...@@ -43,12 +43,12 @@ test_cachetable_def_flush (int n) { ...@@ -43,12 +43,12 @@ test_cachetable_def_flush (int n) {
uint32_t hi; uint32_t hi;
void *v; void *v;
hi = toku_cachetable_hash(f1, make_blocknum(i)); hi = toku_cachetable_hash(f1, make_blocknum(i));
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, &v); r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, PL_WRITE_EXPENSIVE, &v);
assert(r == 0 && v == (void *)(long)i); assert(r == 0 && v == (void *)(long)i);
r = toku_test_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1)); r = toku_test_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
assert(r == 0); assert(r == 0);
hi = toku_cachetable_hash(f2, make_blocknum(i)); hi = toku_cachetable_hash(f2, make_blocknum(i));
r = toku_cachetable_maybe_get_and_pin(f2, make_blocknum(i), hi, &v); r = toku_cachetable_maybe_get_and_pin(f2, make_blocknum(i), hi, PL_WRITE_EXPENSIVE, &v);
assert(r == 0 && v == (void *)(long)i); assert(r == 0 && v == (void *)(long)i);
r = toku_test_cachetable_unpin(f2, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1)); r = toku_test_cachetable_unpin(f2, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
assert(r == 0); assert(r == 0);
...@@ -63,10 +63,10 @@ test_cachetable_def_flush (int n) { ...@@ -63,10 +63,10 @@ test_cachetable_def_flush (int n) {
uint32_t hi; uint32_t hi;
void *v; void *v;
hi = toku_cachetable_hash(f1, make_blocknum(i)); hi = toku_cachetable_hash(f1, make_blocknum(i));
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, &v); r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, PL_WRITE_EXPENSIVE, &v);
assert(r != 0); assert(r != 0);
hi = toku_cachetable_hash(f2, make_blocknum(i)); hi = toku_cachetable_hash(f2, make_blocknum(i));
r = toku_cachetable_maybe_get_and_pin(f2, make_blocknum(i), hi, &v); r = toku_cachetable_maybe_get_and_pin(f2, make_blocknum(i), hi, PL_WRITE_EXPENSIVE, &v);
assert(r == 0); assert(r == 0);
r = toku_test_cachetable_unpin(f2, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1)); r = toku_test_cachetable_unpin(f2, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
assert(r == 0); assert(r == 0);
......
...@@ -122,7 +122,7 @@ static void cachetable_prefetch_checkpoint_test(int n, enum cachetable_dirty dir ...@@ -122,7 +122,7 @@ static void cachetable_prefetch_checkpoint_test(int n, enum cachetable_dirty dir
CACHEKEY key = make_blocknum(i); CACHEKEY key = make_blocknum(i);
uint32_t hi = toku_cachetable_hash(f1, key); uint32_t hi = toku_cachetable_hash(f1, key);
void *v; void *v;
r = toku_cachetable_maybe_get_and_pin(f1, key, hi, &v); r = toku_cachetable_maybe_get_and_pin(f1, key, hi, PL_WRITE_EXPENSIVE, &v);
if (r != 0) if (r != 0)
continue; continue;
r = toku_test_cachetable_unpin(f1, key, hi, CACHETABLE_CLEAN, make_pair_attr(item_size)); r = toku_test_cachetable_unpin(f1, key, hi, CACHETABLE_CLEAN, make_pair_attr(item_size));
......
...@@ -51,7 +51,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) { ...@@ -51,7 +51,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
int i; int i;
for (i=1; i>=0; i++) { for (i=1; i>=0; i++) {
void *v; void *v;
r = toku_cachetable_maybe_get_and_pin(f1, key, fullhash, &v); r = toku_cachetable_maybe_get_and_pin(f1, key, fullhash, PL_WRITE_EXPENSIVE, &v);
if (r == 0) break; if (r == 0) break;
toku_pthread_yield(); toku_pthread_yield();
} }
......
...@@ -58,7 +58,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) { ...@@ -58,7 +58,7 @@ static void cachetable_prefetch_maybegetandpin_test (void) {
int i; int i;
for (i=1; i>=0; i++) { for (i=1; i>=0; i++) {
void *v; void *v;
r = toku_cachetable_maybe_get_and_pin(f1, key, fullhash, &v); r = toku_cachetable_maybe_get_and_pin(f1, key, fullhash, PL_WRITE_EXPENSIVE, &v);
if (r == 0) break; if (r == 0) break;
toku_pthread_yield(); toku_pthread_yield();
} }
......
...@@ -26,7 +26,7 @@ cachetable_put_test (int n) { ...@@ -26,7 +26,7 @@ cachetable_put_test (int n) {
assert(toku_cachefile_count_pinned(f1, 0) == i); assert(toku_cachefile_count_pinned(f1, 0) == i);
void *v; void *v;
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, &v); r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, PL_WRITE_EXPENSIVE, &v);
assert(r == -1); assert(r == -1);
assert(toku_cachefile_count_pinned(f1, 0) == i); assert(toku_cachefile_count_pinned(f1, 0) == i);
......
...@@ -26,37 +26,37 @@ cachetable_test (void) { ...@@ -26,37 +26,37 @@ cachetable_test (void) {
void* v1; void* v1;
long s1; long s1;
// nothing in cachetable, so this should fail // nothing in cachetable, so this should fail
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, &v1); r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
assert(r==-1); assert(r==-1);
r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL); r = toku_cachetable_get_and_pin(f1, make_blocknum(1), 1, &v1, &s1, wc, def_fetch, def_pf_req_callback, def_pf_callback, true, NULL);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8)); r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_CLEAN, make_pair_attr(8));
// maybe_get_and_pin_clean should succeed, maybe_get_and_pin should fail // maybe_get_and_pin_clean should succeed, maybe_get_and_pin should fail
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, &v1); r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
assert(r==-1); assert(r==-1);
r = toku_cachetable_maybe_get_and_pin_clean(f1, make_blocknum(1), 1, &v1); r = toku_cachetable_maybe_get_and_pin_clean(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
assert(r == 0); assert(r == 0);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8)); r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
// maybe_get_and_pin_clean should succeed, maybe_get_and_pin should fail // maybe_get_and_pin_clean should succeed, maybe_get_and_pin should fail
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, &v1); r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
assert(r==0); assert(r==0);
// now these calls should fail because the node is already pinned, and therefore in use // now these calls should fail because the node is already pinned, and therefore in use
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, &v1); r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
assert(r==-1); assert(r==-1);
r = toku_cachetable_maybe_get_and_pin_clean(f1, make_blocknum(1), 1, &v1); r = toku_cachetable_maybe_get_and_pin_clean(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
assert(r==-1); assert(r==-1);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8)); r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
// sanity check, this should still succeed, because the PAIR is dirty // sanity check, this should still succeed, because the PAIR is dirty
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, &v1); r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
assert(r==0); assert(r==0);
r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8)); r = toku_test_cachetable_unpin(f1, make_blocknum(1), 1, CACHETABLE_DIRTY, make_pair_attr(8));
CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct); CHECKPOINTER cp = toku_cachetable_get_checkpointer(ct);
toku_cachetable_begin_checkpoint(cp, NULL); toku_cachetable_begin_checkpoint(cp, NULL);
// now these should fail, because the node should be pending a checkpoint // now these should fail, because the node should be pending a checkpoint
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, &v1); r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
assert(r==-1); assert(r==-1);
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, &v1); r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(1), 1, PL_WRITE_EXPENSIVE, &v1);
assert(r==-1); assert(r==-1);
toku_cachetable_end_checkpoint( toku_cachetable_end_checkpoint(
cp, cp,
......
...@@ -105,7 +105,7 @@ static void test_nested_pin (void) { ...@@ -105,7 +105,7 @@ static void test_nested_pin (void) {
assert(i0==0); assert(i0==0);
r = toku_test_cachetable_unpin(f, make_blocknum(1), f1hash, CACHETABLE_CLEAN, make_pair_attr(test_object_size)); r = toku_test_cachetable_unpin(f, make_blocknum(1), f1hash, CACHETABLE_CLEAN, make_pair_attr(test_object_size));
assert(r==0); assert(r==0);
r = toku_cachetable_maybe_get_and_pin(f, make_blocknum(1), f1hash, &vv2); r = toku_cachetable_maybe_get_and_pin(f, make_blocknum(1), f1hash, PL_WRITE_EXPENSIVE, &vv2);
assert(r==0); assert(r==0);
assert(vv2==vv); assert(vv2==vv);
r = toku_test_cachetable_unpin(f, make_blocknum(1), f1hash, CACHETABLE_CLEAN, make_pair_attr(test_object_size)); r = toku_test_cachetable_unpin(f, make_blocknum(1), f1hash, CACHETABLE_CLEAN, make_pair_attr(test_object_size));
......
...@@ -63,7 +63,7 @@ cachetable_unpin_and_remove_test (int n) { ...@@ -63,7 +63,7 @@ cachetable_unpin_and_remove_test (int n) {
// verify that k is removed // verify that k is removed
void *v; void *v;
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(testkeys[i].b), hi, &v); r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(testkeys[i].b), hi, PL_WRITE_EXPENSIVE, &v);
assert(r != 0); assert(r != 0);
testkeys[i] = testkeys[nkeys-1]; nkeys -= 1; testkeys[i] = testkeys[nkeys-1]; nkeys -= 1;
......
...@@ -27,7 +27,7 @@ cachetable_unpin_test (int n) { ...@@ -27,7 +27,7 @@ cachetable_unpin_test (int n) {
assert(toku_cachefile_count_pinned(f1, 0) == i); assert(toku_cachefile_count_pinned(f1, 0) == i);
void *v; void *v;
r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, &v); r = toku_cachetable_maybe_get_and_pin(f1, make_blocknum(i), hi, PL_WRITE_EXPENSIVE, &v);
assert(r == -1); assert(r == -1);
assert(toku_cachefile_count_pinned(f1, 0) == i); assert(toku_cachefile_count_pinned(f1, 0) == i);
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#ident "Copyright (c) 2007-2012 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007-2012 Tokutek Inc. All rights reserved."
#include "includes.h" #include "includes.h"
#include "test.h" #include "test.h"
#include <portability/toku_atomic.h>
static int total_size; static int total_size;
...@@ -25,7 +26,7 @@ flush (CACHEFILE f __attribute__((__unused__)), ...@@ -25,7 +26,7 @@ flush (CACHEFILE f __attribute__((__unused__)),
bool UU(is_clone) bool UU(is_clone)
) { ) {
if (w) { if (w) {
int curr_size = __sync_fetch_and_sub(&total_size, 1); int curr_size = toku_sync_fetch_and_sub(&total_size, 1);
assert(curr_size <= 200); assert(curr_size <= 200);
usleep(500*1000); usleep(500*1000);
} }
...@@ -49,7 +50,7 @@ cachetable_test (void) { ...@@ -49,7 +50,7 @@ cachetable_test (void) {
CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL); CACHETABLE_WRITE_CALLBACK wc = def_write_callback(NULL);
wc.flush_callback = flush; wc.flush_callback = flush;
toku_cachetable_put(f1, make_blocknum(i), i, NULL, make_pair_attr(1), wc, put_callback_nop); toku_cachetable_put(f1, make_blocknum(i), i, NULL, make_pair_attr(1), wc, put_callback_nop);
int curr_size = __sync_fetch_and_add(&total_size, 1); int curr_size = toku_sync_fetch_and_add(&total_size, 1);
assert(curr_size <= test_limit + test_limit/2+1); assert(curr_size <= test_limit + test_limit/2+1);
r = toku_test_cachetable_unpin(f1, make_blocknum(i), i, CACHETABLE_DIRTY, make_pair_attr(4)); r = toku_test_cachetable_unpin(f1, make_blocknum(i), i, CACHETABLE_DIRTY, make_pair_attr(4));
} }
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#ifndef FTLOADER_ERROR_INJECTOR_H #ifndef FTLOADER_ERROR_INJECTOR_H
#define FTLOADER_ERROR_INJECTOR_H #define FTLOADER_ERROR_INJECTOR_H
#include <portability/toku_atomic.h>
static toku_mutex_t event_mutex = TOKU_MUTEX_INITIALIZER; static toku_mutex_t event_mutex = TOKU_MUTEX_INITIALIZER;
static void lock_events(void) { static void lock_events(void) {
...@@ -107,9 +108,9 @@ static void reset_my_malloc_counts(void) { ...@@ -107,9 +108,9 @@ static void reset_my_malloc_counts(void) {
__attribute__((__unused__)) __attribute__((__unused__))
static void *my_malloc(size_t n) { static void *my_malloc(size_t n) {
(void) __sync_fetch_and_add(&my_malloc_count, 1); // my_malloc_count++; (void) toku_sync_fetch_and_add(&my_malloc_count, 1); // my_malloc_count++;
if (n >= my_big_malloc_limit) { if (n >= my_big_malloc_limit) {
(void) __sync_fetch_and_add(&my_big_malloc_count, 1); // my_big_malloc_count++; (void) toku_sync_fetch_and_add(&my_big_malloc_count, 1); // my_big_malloc_count++;
if (do_malloc_errors) { if (do_malloc_errors) {
if (event_add_and_fetch() == event_count_trigger) { if (event_add_and_fetch() == event_count_trigger) {
event_hit(); event_hit();
...@@ -125,9 +126,9 @@ static int do_realloc_errors = 0; ...@@ -125,9 +126,9 @@ static int do_realloc_errors = 0;
__attribute__((__unused__)) __attribute__((__unused__))
static void *my_realloc(void *p, size_t n) { static void *my_realloc(void *p, size_t n) {
(void) __sync_fetch_and_add(&my_realloc_count, 1); // my_realloc_count++; (void) toku_sync_fetch_and_add(&my_realloc_count, 1); // my_realloc_count++;
if (n >= my_big_malloc_limit) { if (n >= my_big_malloc_limit) {
(void) __sync_fetch_and_add(&my_big_realloc_count, 1); // my_big_realloc_count++; (void) toku_sync_fetch_and_add(&my_big_realloc_count, 1); // my_big_realloc_count++;
if (do_realloc_errors) { if (do_realloc_errors) {
if (event_add_and_fetch() == event_count_trigger) { if (event_add_and_fetch() == event_count_trigger) {
event_hit(); event_hit();
......
...@@ -82,8 +82,8 @@ static void test_keyrange (enum memory_state ms, uint64_t limit) { ...@@ -82,8 +82,8 @@ static void test_keyrange (enum memory_state ms, uint64_t limit) {
struct ftstat64_s s; struct ftstat64_s s;
toku_ft_handle_stat64(t, null_txn, &s); toku_ft_handle_stat64(t, null_txn, &s);
assert(0 < s.nkeys && s.nkeys < limit); assert(0 < s.nkeys && s.nkeys <= limit);
assert(0 < s.dsize && s.dsize < limit * (9 + 9)); // keylen = 9, vallen = 9 assert(0 < s.dsize && s.dsize <= limit * (9 + 9)); // keylen = 9, vallen = 9
} }
maybe_reopen(ms, limit); maybe_reopen(ms, limit);
......
...@@ -138,7 +138,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) { ...@@ -138,7 +138,7 @@ test_make_tree(int height, int fanout, int nperleaf, int do_verify) {
// set the new root to point to the new tree // set the new root to point to the new tree
toku_ft_set_new_root_blocknum(brt->ft, newroot->thisnodename); toku_ft_set_new_root_blocknum(brt->ft, newroot->thisnodename);
newroot->max_msn_applied_to_node_on_disk = last_dummymsn(); // capture msn of last message injected into tree brt->ft->h->max_msn_in_ft = last_dummymsn(); // capture msn of last message injected into tree
// unpin the new root // unpin the new root
toku_unpin_ftnode(brt->ft, newroot); toku_unpin_ftnode(brt->ft, newroot);
......
...@@ -45,10 +45,10 @@ append_leaf(FT_HANDLE brt, FTNODE leafnode, void *key, uint32_t keylen, void *va ...@@ -45,10 +45,10 @@ append_leaf(FT_HANDLE brt, FTNODE leafnode, void *key, uint32_t keylen, void *va
// apply an insert to the leaf node // apply an insert to the leaf node
MSN msn = next_dummymsn(); MSN msn = next_dummymsn();
brt->ft->h->max_msn_in_ft = msn;
FT_MSG_S cmd = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &theval }} }; FT_MSG_S cmd = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &theval }} };
uint64_t workdone=0; toku_ft_leaf_apply_cmd(brt->ft->compare_fun, brt->ft->update_fun, &brt->ft->cmp_descriptor, leafnode, -1, &cmd, nullptr, nullptr);
toku_ft_leaf_apply_cmd(brt->ft->compare_fun, brt->ft->update_fun, &brt->ft->cmp_descriptor, leafnode, &cmd, &workdone, NULL);
{ {
int r = toku_ft_lookup(brt, &thekey, lookup_checkf, &pair); int r = toku_ft_lookup(brt, &thekey, lookup_checkf, &pair);
assert(r==0); assert(r==0);
...@@ -56,8 +56,7 @@ append_leaf(FT_HANDLE brt, FTNODE leafnode, void *key, uint32_t keylen, void *va ...@@ -56,8 +56,7 @@ append_leaf(FT_HANDLE brt, FTNODE leafnode, void *key, uint32_t keylen, void *va
} }
FT_MSG_S badcmd = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &badval }} }; FT_MSG_S badcmd = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &badval }} };
toku_ft_leaf_apply_cmd(brt->ft->compare_fun, brt->ft->update_fun, &brt->ft->cmp_descriptor, leafnode, &badcmd, &workdone, NULL); toku_ft_leaf_apply_cmd(brt->ft->compare_fun, brt->ft->update_fun, &brt->ft->cmp_descriptor, leafnode, -1, &badcmd, nullptr, nullptr);
// message should be rejected for duplicate msn, row should still have original val // message should be rejected for duplicate msn, row should still have original val
{ {
...@@ -68,8 +67,9 @@ append_leaf(FT_HANDLE brt, FTNODE leafnode, void *key, uint32_t keylen, void *va ...@@ -68,8 +67,9 @@ append_leaf(FT_HANDLE brt, FTNODE leafnode, void *key, uint32_t keylen, void *va
// now verify that message with proper msn gets through // now verify that message with proper msn gets through
msn = next_dummymsn(); msn = next_dummymsn();
brt->ft->h->max_msn_in_ft = msn;
FT_MSG_S cmd2 = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &val2 }} }; FT_MSG_S cmd2 = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &val2 }} };
toku_ft_leaf_apply_cmd(brt->ft->compare_fun, brt->ft->update_fun, &brt->ft->cmp_descriptor, leafnode, &cmd2, &workdone, NULL); toku_ft_leaf_apply_cmd(brt->ft->compare_fun, brt->ft->update_fun, &brt->ft->cmp_descriptor, leafnode, -1, &cmd2, nullptr, nullptr);
// message should be accepted, val should have new value // message should be accepted, val should have new value
{ {
...@@ -81,7 +81,7 @@ append_leaf(FT_HANDLE brt, FTNODE leafnode, void *key, uint32_t keylen, void *va ...@@ -81,7 +81,7 @@ append_leaf(FT_HANDLE brt, FTNODE leafnode, void *key, uint32_t keylen, void *va
// now verify that message with lesser (older) msn is rejected // now verify that message with lesser (older) msn is rejected
msn.msn = msn.msn - 10; msn.msn = msn.msn - 10;
FT_MSG_S cmd3 = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &badval } }}; FT_MSG_S cmd3 = { FT_INSERT, msn, xids_get_root_xids(), .u={.id = { &thekey, &badval } }};
toku_ft_leaf_apply_cmd(brt->ft->compare_fun, brt->ft->update_fun, &brt->ft->cmp_descriptor, leafnode, &cmd3, &workdone, NULL); toku_ft_leaf_apply_cmd(brt->ft->compare_fun, brt->ft->update_fun, &brt->ft->cmp_descriptor, leafnode, -1, &cmd3, nullptr, nullptr);
// message should be rejected, val should still have value in pair2 // message should be rejected, val should still have value in pair2
{ {
......
...@@ -580,7 +580,7 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) { ...@@ -580,7 +580,7 @@ flush_to_leaf(FT_HANDLE t, bool make_leaf_up_to_date, bool use_flush) {
if (make_leaf_up_to_date) { if (make_leaf_up_to_date) {
for (i = 0; i < num_parent_messages; ++i) { for (i = 0; i < num_parent_messages; ++i) {
if (!parent_messages_is_fresh[i]) { if (!parent_messages_is_fresh[i]) {
toku_ft_leaf_apply_cmd(t->ft->compare_fun, t->ft->update_fun, &t->ft->descriptor, child, parent_messages[i], NULL, NULL); toku_ft_leaf_apply_cmd(t->ft->compare_fun, t->ft->update_fun, &t->ft->descriptor, child, -1, parent_messages[i], NULL, NULL);
} }
} }
for (i = 0; i < 8; ++i) { for (i = 0; i < 8; ++i) {
...@@ -803,7 +803,7 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) { ...@@ -803,7 +803,7 @@ flush_to_leaf_with_keyrange(FT_HANDLE t, bool make_leaf_up_to_date) {
for (i = 0; i < num_parent_messages; ++i) { for (i = 0; i < num_parent_messages; ++i) {
if (dummy_cmp(NULL, parent_messages[i]->u.id.key, &childkeys[7]) <= 0 && if (dummy_cmp(NULL, parent_messages[i]->u.id.key, &childkeys[7]) <= 0 &&
!parent_messages_is_fresh[i]) { !parent_messages_is_fresh[i]) {
toku_ft_leaf_apply_cmd(t->ft->compare_fun, t->ft->update_fun, &t->ft->descriptor, child, parent_messages[i], NULL, NULL); toku_ft_leaf_apply_cmd(t->ft->compare_fun, t->ft->update_fun, &t->ft->descriptor, child, -1, parent_messages[i], NULL, NULL);
} }
} }
for (i = 0; i < 8; ++i) { for (i = 0; i < 8; ++i) {
...@@ -995,8 +995,8 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) { ...@@ -995,8 +995,8 @@ compare_apply_and_flush(FT_HANDLE t, bool make_leaf_up_to_date) {
if (make_leaf_up_to_date) { if (make_leaf_up_to_date) {
for (i = 0; i < num_parent_messages; ++i) { for (i = 0; i < num_parent_messages; ++i) {
if (!parent_messages_is_fresh[i]) { if (!parent_messages_is_fresh[i]) {
toku_ft_leaf_apply_cmd(t->ft->compare_fun, t->ft->update_fun, &t->ft->descriptor, child1, parent_messages[i], NULL, NULL); toku_ft_leaf_apply_cmd(t->ft->compare_fun, t->ft->update_fun, &t->ft->descriptor, child1, -1, parent_messages[i], NULL, NULL);
toku_ft_leaf_apply_cmd(t->ft->compare_fun, t->ft->update_fun, &t->ft->descriptor, child2, parent_messages[i], NULL, NULL); toku_ft_leaf_apply_cmd(t->ft->compare_fun, t->ft->update_fun, &t->ft->descriptor, child2, -1, parent_messages[i], NULL, NULL);
} }
} }
for (i = 0; i < 8; ++i) { for (i = 0; i < 8; ++i) {
......
...@@ -228,7 +228,8 @@ static UU() CACHETABLE_WRITE_CALLBACK def_write_callback(void* write_extraargs) ...@@ -228,7 +228,8 @@ static UU() CACHETABLE_WRITE_CALLBACK def_write_callback(void* write_extraargs)
wc.pe_callback = def_pe_callback; wc.pe_callback = def_pe_callback;
wc.cleaner_callback = def_cleaner_callback; wc.cleaner_callback = def_cleaner_callback;
wc.write_extraargs = write_extraargs; wc.write_extraargs = write_extraargs;
wc.clone_callback = NULL; wc.clone_callback = nullptr;
wc.checkpoint_complete_callback = nullptr;
return wc; return wc;
} }
......
...@@ -171,7 +171,7 @@ test_split_on_boundary(void) ...@@ -171,7 +171,7 @@ test_split_on_boundary(void)
FTNODE nodea, nodeb; FTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, 0, NULL); ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
verify_basement_node_msns(nodea, dummy_msn_3884); verify_basement_node_msns(nodea, dummy_msn_3884);
verify_basement_node_msns(nodeb, dummy_msn_3884); verify_basement_node_msns(nodeb, dummy_msn_3884);
...@@ -244,7 +244,7 @@ test_split_with_everything_on_the_left(void) ...@@ -244,7 +244,7 @@ test_split_with_everything_on_the_left(void)
FTNODE nodea, nodeb; FTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, 0, NULL); ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
toku_unpin_ftnode(brt->ft, nodeb); toku_unpin_ftnode(brt->ft, nodeb);
r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0); r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0);
...@@ -319,7 +319,7 @@ test_split_on_boundary_of_last_node(void) ...@@ -319,7 +319,7 @@ test_split_on_boundary_of_last_node(void)
FTNODE nodea, nodeb; FTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, 0, NULL); ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
toku_unpin_ftnode(brt->ft, nodeb); toku_unpin_ftnode(brt->ft, nodeb);
r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0); r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0);
...@@ -387,7 +387,7 @@ test_split_at_begin(void) ...@@ -387,7 +387,7 @@ test_split_at_begin(void)
FTNODE nodea, nodeb; FTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, 0, NULL); ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
toku_unpin_ftnode(brt->ft, nodeb); toku_unpin_ftnode(brt->ft, nodeb);
r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0); r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0);
...@@ -451,7 +451,7 @@ test_split_at_end(void) ...@@ -451,7 +451,7 @@ test_split_at_end(void)
FTNODE nodea, nodeb; FTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, 0, NULL); ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
toku_unpin_ftnode(brt->ft, nodeb); toku_unpin_ftnode(brt->ft, nodeb);
r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0); r = toku_close_ft_handle_nolsn(brt, NULL); assert(r == 0);
...@@ -505,7 +505,7 @@ test_split_odd_nodes(void) ...@@ -505,7 +505,7 @@ test_split_odd_nodes(void)
FTNODE nodea, nodeb; FTNODE nodea, nodeb;
DBT splitk; DBT splitk;
// if we haven't done it right, we should hit the assert in the top of move_leafentries // if we haven't done it right, we should hit the assert in the top of move_leafentries
ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, 0, NULL); ftleaf_split(brt->ft, &sn, &nodea, &nodeb, &splitk, true, SPLIT_EVENLY, 0, NULL);
verify_basement_node_msns(nodea, dummy_msn_3884); verify_basement_node_msns(nodea, dummy_msn_3884);
verify_basement_node_msns(nodeb, dummy_msn_3884); verify_basement_node_msns(nodeb, dummy_msn_3884);
......
...@@ -62,7 +62,7 @@ void le_unpack(ULE ule, LEAFENTRY le); ...@@ -62,7 +62,7 @@ void le_unpack(ULE ule, LEAFENTRY le);
int le_pack(ULE ule, // data to be packed into new leafentry int le_pack(ULE ule, // data to be packed into new leafentry
size_t *new_leafentry_memorysize, size_t *new_leafentry_memorysize,
LEAFENTRY * const new_leafentry_p, // this is what this function creates LEAFENTRY * const new_leafentry_p, // this is what this function creates
OMT omt, OMT *omtp,
struct mempool *mp, struct mempool *mp,
void **maybe_free); void **maybe_free);
......
...@@ -154,11 +154,11 @@ static inline size_t uxr_unpack_length_and_bit(UXR uxr, uint8_t *p); ...@@ -154,11 +154,11 @@ static inline size_t uxr_unpack_length_and_bit(UXR uxr, uint8_t *p);
static inline size_t uxr_unpack_data(UXR uxr, uint8_t *p); static inline size_t uxr_unpack_data(UXR uxr, uint8_t *p);
static void * static void *
le_malloc(OMT omt, struct mempool *mp, size_t size, void **maybe_free) le_malloc(OMT *omtp, struct mempool *mp, size_t size, void **maybe_free)
{ {
void * rval; void * rval;
if (omt) if (omtp)
rval = mempool_malloc_from_omt(omt, mp, size, maybe_free); rval = mempool_malloc_from_omt(omtp, mp, size, maybe_free);
else else
rval = toku_xmalloc(size); rval = toku_xmalloc(size);
resource_assert(rval); resource_assert(rval);
...@@ -322,7 +322,7 @@ apply_msg_to_leafentry(FT_MSG msg, // message to apply to leafentry ...@@ -322,7 +322,7 @@ apply_msg_to_leafentry(FT_MSG msg, // message to apply to leafentry
LEAFENTRY old_leafentry, // NULL if there was no stored data. LEAFENTRY old_leafentry, // NULL if there was no stored data.
size_t *new_leafentry_memorysize, size_t *new_leafentry_memorysize,
LEAFENTRY *new_leafentry_p, LEAFENTRY *new_leafentry_p,
OMT omt, OMT *omtp,
struct mempool *mp, struct mempool *mp,
void **maybe_free, void **maybe_free,
int64_t * numbytes_delta_p) { // change in total size of key and val, not including any overhead int64_t * numbytes_delta_p) { // change in total size of key and val, not including any overhead
...@@ -341,7 +341,7 @@ apply_msg_to_leafentry(FT_MSG msg, // message to apply to leafentry ...@@ -341,7 +341,7 @@ apply_msg_to_leafentry(FT_MSG msg, // message to apply to leafentry
rval = le_pack(&ule, // create packed leafentry rval = le_pack(&ule, // create packed leafentry
new_leafentry_memorysize, new_leafentry_memorysize,
new_leafentry_p, new_leafentry_p,
omt, omtp,
mp, mp,
maybe_free); maybe_free);
if (new_leafentry_p) if (new_leafentry_p)
...@@ -374,7 +374,7 @@ int ...@@ -374,7 +374,7 @@ int
garbage_collect_leafentry(LEAFENTRY old_leaf_entry, garbage_collect_leafentry(LEAFENTRY old_leaf_entry,
LEAFENTRY *new_leaf_entry, LEAFENTRY *new_leaf_entry,
size_t *new_leaf_entry_memory_size, size_t *new_leaf_entry_memory_size,
OMT omt, OMT *omtp,
struct mempool *mp, struct mempool *mp,
void **maybe_free, void **maybe_free,
const xid_omt_t &snapshot_xids, const xid_omt_t &snapshot_xids,
...@@ -387,7 +387,7 @@ garbage_collect_leafentry(LEAFENTRY old_leaf_entry, ...@@ -387,7 +387,7 @@ garbage_collect_leafentry(LEAFENTRY old_leaf_entry,
r = le_pack(&ule, r = le_pack(&ule,
new_leaf_entry_memory_size, new_leaf_entry_memory_size,
new_leaf_entry, new_leaf_entry,
omt, omtp,
mp, mp,
maybe_free); maybe_free);
assert(r == 0); assert(r == 0);
...@@ -713,7 +713,7 @@ int ...@@ -713,7 +713,7 @@ int
le_pack(ULE ule, // data to be packed into new leafentry le_pack(ULE ule, // data to be packed into new leafentry
size_t *new_leafentry_memorysize, size_t *new_leafentry_memorysize,
LEAFENTRY * const new_leafentry_p, // this is what this function creates LEAFENTRY * const new_leafentry_p, // this is what this function creates
OMT omt, OMT *omtp,
struct mempool *mp, struct mempool *mp,
void **maybe_free) void **maybe_free)
{ {
...@@ -740,7 +740,7 @@ le_pack(ULE ule, // data to be packed into new leafen ...@@ -740,7 +740,7 @@ le_pack(ULE ule, // data to be packed into new leafen
found_insert:; found_insert:;
memsize = le_memsize_from_ule(ule); memsize = le_memsize_from_ule(ule);
LEAFENTRY new_leafentry; LEAFENTRY new_leafentry;
CAST_FROM_VOIDP(new_leafentry, le_malloc(omt, mp, memsize, maybe_free)); CAST_FROM_VOIDP(new_leafentry, le_malloc(omtp, mp, memsize, maybe_free));
//Universal data //Universal data
new_leafentry->keylen = toku_htod32(ule->keylen); new_leafentry->keylen = toku_htod32(ule->keylen);
...@@ -2293,7 +2293,7 @@ int ...@@ -2293,7 +2293,7 @@ int
toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry, toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry,
size_t *new_leafentry_memorysize, size_t *new_leafentry_memorysize,
LEAFENTRY *new_leafentry_p, LEAFENTRY *new_leafentry_p,
OMT omt, OMT *omtp,
struct mempool *mp) { struct mempool *mp) {
ULE_S ule; ULE_S ule;
int rval; int rval;
...@@ -2305,7 +2305,7 @@ toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry, ...@@ -2305,7 +2305,7 @@ toku_le_upgrade_13_14(LEAFENTRY_13 old_leafentry,
rval = le_pack(&ule, // create packed leafentry rval = le_pack(&ule, // create packed leafentry
new_leafentry_memorysize, new_leafentry_memorysize,
new_leafentry_p, new_leafentry_p,
omt, mp, NULL); omtp, mp, NULL);
ule_cleanup(&ule); ule_cleanup(&ule);
return rval; return rval;
} }
......
...@@ -56,7 +56,7 @@ int apply_msg_to_leafentry(FT_MSG msg, ...@@ -56,7 +56,7 @@ int apply_msg_to_leafentry(FT_MSG msg,
LEAFENTRY old_leafentry, // NULL if there was no stored data. LEAFENTRY old_leafentry, // NULL if there was no stored data.
size_t *new_leafentry_memorysize, size_t *new_leafentry_memorysize,
LEAFENTRY *new_leafentry_p, LEAFENTRY *new_leafentry_p,
OMT omt, OMT *omtp,
struct mempool *mp, struct mempool *mp,
void **maybe_free, void **maybe_free,
int64_t * numbytes_delta_p); int64_t * numbytes_delta_p);
...@@ -64,7 +64,7 @@ int apply_msg_to_leafentry(FT_MSG msg, ...@@ -64,7 +64,7 @@ int apply_msg_to_leafentry(FT_MSG msg,
int garbage_collect_leafentry(LEAFENTRY old_leaf_entry, int garbage_collect_leafentry(LEAFENTRY old_leaf_entry,
LEAFENTRY *new_leaf_entry, LEAFENTRY *new_leaf_entry,
size_t *new_leaf_entry_memory_size, size_t *new_leaf_entry_memory_size,
OMT omt, OMT *omtp,
struct mempool *mp, struct mempool *mp,
void **maybe_free, void **maybe_free,
const xid_omt_t &snapshot_xids, const xid_omt_t &snapshot_xids,
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <errno.h> #include <errno.h>
#include "threadpool.h" #include "threadpool.h"
#include <portability/toku_atomic.h>
// use gcc builtin fetch_and_add 0->no 1->yes // use gcc builtin fetch_and_add 0->no 1->yes
#define DO_ATOMIC_FETCH_AND_ADD 0 #define DO_ATOMIC_FETCH_AND_ADD 0
...@@ -61,7 +62,7 @@ void threadpool_maybe_add(THREADPOOL threadpool, void *(*f)(void *), void *arg) ...@@ -61,7 +62,7 @@ void threadpool_maybe_add(THREADPOOL threadpool, void *(*f)(void *), void *arg)
void threadpool_set_thread_busy(THREADPOOL threadpool) { void threadpool_set_thread_busy(THREADPOOL threadpool) {
#if DO_ATOMIC_FETCH_AND_ADD #if DO_ATOMIC_FETCH_AND_ADD
(void) __sync_fetch_and_add(&threadpool->busy_threads, 1); (void) toku_sync_fetch_and_add(&threadpool->busy_threads, 1);
#else #else
threadpool->busy_threads++; threadpool->busy_threads++;
#endif #endif
...@@ -69,7 +70,7 @@ void threadpool_set_thread_busy(THREADPOOL threadpool) { ...@@ -69,7 +70,7 @@ void threadpool_set_thread_busy(THREADPOOL threadpool) {
void threadpool_set_thread_idle(THREADPOOL threadpool) { void threadpool_set_thread_idle(THREADPOOL threadpool) {
#if DO_ATOMIC_FETCH_AND_ADD #if DO_ATOMIC_FETCH_AND_ADD
(void) __sync_fetch_and_add(&threadpool->busy_threads, -1); (void) toku_sync_fetch_and_add(&threadpool->busy_threads, -1);
#else #else
threadpool->busy_threads--; threadpool->busy_threads--;
#endif #endif
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "memory.h" #include "memory.h"
#include "toku_time.h" #include "toku_time.h"
#include <portability/toku_atomic.h>
static int toku_assert_on_write_enospc = 0; static int toku_assert_on_write_enospc = 0;
static const int toku_write_enospc_sleep = 1; static const int toku_write_enospc_sleep = 1;
...@@ -60,8 +61,8 @@ try_again_after_handling_write_error(int fd, size_t len, ssize_t r_write) { ...@@ -60,8 +61,8 @@ try_again_after_handling_write_error(int fd, size_t len, ssize_t r_write) {
int out_of_disk_space = 1; int out_of_disk_space = 1;
assert(!out_of_disk_space); //Give an error message that might be useful if this is the only one that survives. assert(!out_of_disk_space); //Give an error message that might be useful if this is the only one that survives.
} else { } else {
__sync_fetch_and_add(&toku_write_enospc_total, 1); toku_sync_fetch_and_add(&toku_write_enospc_total, 1);
__sync_fetch_and_add(&toku_write_enospc_current, 1); toku_sync_fetch_and_add(&toku_write_enospc_current, 1);
time_t tnow = time(0); time_t tnow = time(0);
toku_write_enospc_last_time = tnow; toku_write_enospc_last_time = tnow;
...@@ -89,7 +90,7 @@ try_again_after_handling_write_error(int fd, size_t len, ssize_t r_write) { ...@@ -89,7 +90,7 @@ try_again_after_handling_write_error(int fd, size_t len, ssize_t r_write) {
} }
sleep(toku_write_enospc_sleep); sleep(toku_write_enospc_sleep);
try_again = 1; try_again = 1;
__sync_fetch_and_sub(&toku_write_enospc_current, 1); toku_sync_fetch_and_sub(&toku_write_enospc_current, 1);
break; break;
} }
} }
...@@ -347,9 +348,9 @@ static void file_fsync_internal (int fd, uint64_t *duration_p) { ...@@ -347,9 +348,9 @@ static void file_fsync_internal (int fd, uint64_t *duration_p) {
assert(get_error_errno() == EINTR); assert(get_error_errno() == EINTR);
} }
} }
__sync_fetch_and_add(&toku_fsync_count, 1); toku_sync_fetch_and_add(&toku_fsync_count, 1);
uint64_t duration = toku_current_time_usec() - tstart; uint64_t duration = toku_current_time_usec() - tstart;
__sync_fetch_and_add(&toku_fsync_time, duration); toku_sync_fetch_and_add(&toku_fsync_time, duration);
if (duration_p) { if (duration_p) {
*duration_p = duration; *duration_p = duration;
} }
...@@ -383,8 +384,8 @@ int toku_fsync_dir_by_name_without_accounting(const char *dir_name) { ...@@ -383,8 +384,8 @@ int toku_fsync_dir_by_name_without_accounting(const char *dir_name) {
void toku_file_fsync(int fd) { void toku_file_fsync(int fd) {
uint64_t duration; uint64_t duration;
file_fsync_internal (fd, &duration); file_fsync_internal (fd, &duration);
__sync_fetch_and_add(&sched_fsync_count, 1); toku_sync_fetch_and_add(&sched_fsync_count, 1);
__sync_fetch_and_add(&sched_fsync_time, duration); toku_sync_fetch_and_add(&sched_fsync_time, duration);
} }
// for real accounting // for real accounting
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <toku_race_tools.h> #include <toku_race_tools.h>
#include "memory.h" #include "memory.h"
#include "toku_assert.h" #include "toku_assert.h"
#include <portability/toku_atomic.h>
static malloc_fun_t t_malloc = 0; static malloc_fun_t t_malloc = 0;
static malloc_fun_t t_xmalloc = 0; static malloc_fun_t t_xmalloc = 0;
...@@ -120,7 +121,7 @@ set_max(uint64_t sum_used, uint64_t sum_freed) { ...@@ -120,7 +121,7 @@ set_max(uint64_t sum_used, uint64_t sum_freed) {
do { do {
old_max = status.max_in_use; old_max = status.max_in_use;
} while (old_max < in_use && } while (old_max < in_use &&
!__sync_bool_compare_and_swap(&status.max_in_use, old_max, in_use)); !toku_sync_bool_compare_and_swap(&status.max_in_use, old_max, in_use));
} }
} }
...@@ -150,13 +151,13 @@ toku_malloc(size_t size) { ...@@ -150,13 +151,13 @@ toku_malloc(size_t size) {
TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147 TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147
if (toku_memory_do_stats) { if (toku_memory_do_stats) {
size_t used = my_malloc_usable_size(p); size_t used = my_malloc_usable_size(p);
__sync_add_and_fetch(&status.malloc_count, 1); toku_sync_add_and_fetch(&status.malloc_count, 1);
__sync_add_and_fetch(&status.requested,size); toku_sync_add_and_fetch(&status.requested,size);
__sync_add_and_fetch(&status.used, used); toku_sync_add_and_fetch(&status.used, used);
set_max(status.used, status.freed); set_max(status.used, status.freed);
} }
} else { } else {
__sync_add_and_fetch(&status.malloc_fail, 1); toku_sync_add_and_fetch(&status.malloc_fail, 1);
} }
return p; return p;
} }
...@@ -176,14 +177,14 @@ toku_realloc(void *p, size_t size) { ...@@ -176,14 +177,14 @@ toku_realloc(void *p, size_t size) {
if (q) { if (q) {
if (toku_memory_do_stats) { if (toku_memory_do_stats) {
size_t used = my_malloc_usable_size(q); size_t used = my_malloc_usable_size(q);
__sync_add_and_fetch(&status.realloc_count, 1); toku_sync_add_and_fetch(&status.realloc_count, 1);
__sync_add_and_fetch(&status.requested, size); toku_sync_add_and_fetch(&status.requested, size);
__sync_add_and_fetch(&status.used, used); toku_sync_add_and_fetch(&status.used, used);
__sync_add_and_fetch(&status.freed, used_orig); toku_sync_add_and_fetch(&status.freed, used_orig);
set_max(status.used, status.freed); set_max(status.used, status.freed);
} }
} else { } else {
__sync_add_and_fetch(&status.realloc_fail, 1); toku_sync_add_and_fetch(&status.realloc_fail, 1);
} }
return q; return q;
} }
...@@ -205,8 +206,8 @@ toku_free(void *p) { ...@@ -205,8 +206,8 @@ toku_free(void *p) {
if (p) { if (p) {
if (toku_memory_do_stats) { if (toku_memory_do_stats) {
size_t used = my_malloc_usable_size(p); size_t used = my_malloc_usable_size(p);
__sync_add_and_fetch(&status.free_count, 1); toku_sync_add_and_fetch(&status.free_count, 1);
__sync_add_and_fetch(&status.freed, used); toku_sync_add_and_fetch(&status.freed, used);
} }
if (t_free) if (t_free)
t_free(p); t_free(p);
...@@ -228,9 +229,9 @@ toku_xmalloc(size_t size) { ...@@ -228,9 +229,9 @@ toku_xmalloc(size_t size) {
TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147 TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147
if (toku_memory_do_stats) { if (toku_memory_do_stats) {
size_t used = my_malloc_usable_size(p); size_t used = my_malloc_usable_size(p);
__sync_add_and_fetch(&status.malloc_count, 1); toku_sync_add_and_fetch(&status.malloc_count, 1);
__sync_add_and_fetch(&status.requested, size); toku_sync_add_and_fetch(&status.requested, size);
__sync_add_and_fetch(&status.used, used); toku_sync_add_and_fetch(&status.used, used);
set_max(status.used, status.freed); set_max(status.used, status.freed);
} }
return p; return p;
...@@ -252,10 +253,10 @@ toku_xrealloc(void *v, size_t size) { ...@@ -252,10 +253,10 @@ toku_xrealloc(void *v, size_t size) {
resource_assert(p); resource_assert(p);
if (toku_memory_do_stats) { if (toku_memory_do_stats) {
size_t used = my_malloc_usable_size(p); size_t used = my_malloc_usable_size(p);
__sync_add_and_fetch(&status.realloc_count, 1); toku_sync_add_and_fetch(&status.realloc_count, 1);
__sync_add_and_fetch(&status.requested, size); toku_sync_add_and_fetch(&status.requested, size);
__sync_add_and_fetch(&status.used, used); toku_sync_add_and_fetch(&status.used, used);
__sync_add_and_fetch(&status.freed, used_orig); toku_sync_add_and_fetch(&status.freed, used_orig);
set_max(status.used, status.freed); set_max(status.used, status.freed);
} }
return p; return p;
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include "toku_os.h" #include "toku_os.h"
#include "toku_time.h" #include "toku_time.h"
#include "memory.h" #include "memory.h"
#include <portability/toku_atomic.h>
#include <util/partitioned_counter.h> #include <util/partitioned_counter.h>
int int
......
...@@ -5,6 +5,7 @@ if(BUILD_TESTING) ...@@ -5,6 +5,7 @@ if(BUILD_TESTING)
file(GLOB srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" *.cc) file(GLOB srcs RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" *.cc)
foreach(src ${srcs}) foreach(src ${srcs})
get_filename_component(base ${src} NAME_WE) get_filename_component(base ${src} NAME_WE)
set_property(SOURCE ${src} APPEND PROPERTY COMPILE_DEFINITIONS "__SRCFILE__=\"${CMAKE_CURRENT_SOURCE_DIR}/${src}\"")
list(APPEND tests ${base}) list(APPEND tests ${base})
endforeach(src) endforeach(src)
......
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id$"
#ident "Copyright (c) 2007-2012 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include <config.h>
#include <memory.h>
#include <portability/toku_atomic.h>
#include "test.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
int verbose = 0;
static const size_t cachelinesize = 64;
// cache line is 64 bytes
// nine 7-byte structs fill 63 bytes
// the tenth spans one byte of the first cache line and six of the next cache line
// we first SFAA the first 9 structs and ensure we don't crash, then we set a signal handler and SFAA the 10th and ensure we do crash
struct unpackedsevenbytestruct {
uint32_t i;
char pad[3];
};
struct __attribute__((packed)) packedsevenbytestruct {
uint32_t i;
char pad[3];
};
struct packedsevenbytestruct *psevenbytestructs;
static __attribute__((__noreturn__)) void catch_abort (int sig __attribute__((__unused__))) {
toku_free(psevenbytestructs);
#ifdef TOKU_DEBUG_PARANOID
exit(EXIT_SUCCESS); // with paranoid asserts, we expect to assert and reach this handler
#else
exit(EXIT_FAILURE); // we should not have crashed without paranoid asserts
#endif
}
int test_main(int UU(argc), char *const argv[] UU()) {
if (sizeof(unpackedsevenbytestruct) != 8) {
exit(EXIT_FAILURE);
}
if (sizeof(packedsevenbytestruct) != 7) {
exit(EXIT_FAILURE);
}
{
struct unpackedsevenbytestruct *usevenbytestructs;
int r = posix_memalign((void **) &usevenbytestructs, cachelinesize, sizeof(unpackedsevenbytestruct) * 10);
if (r) {
// this test is supposed to crash, so exiting cleanly is a failure
perror("posix_memalign");
exit(EXIT_FAILURE);
}
for (int idx = 0; idx < 10; ++idx) {
usevenbytestructs[idx].i = idx + 1;
(void) toku_sync_fetch_and_add(&usevenbytestructs[idx].i, 32U - idx);
}
toku_free(usevenbytestructs);
}
int r = posix_memalign((void **) &psevenbytestructs, cachelinesize, sizeof(packedsevenbytestruct) * 10);
if (r) {
// this test is supposed to crash, so exiting cleanly is a failure
perror("posix_memalign");
exit(EXIT_FAILURE);
}
for (int idx = 0; idx < 9; ++idx) {
psevenbytestructs[idx].i = idx + 1;
(void) toku_sync_fetch_and_add(&psevenbytestructs[idx].i, 32U - idx);
}
psevenbytestructs[9].i = 10;
signal(SIGABRT, catch_abort);
(void) toku_sync_fetch_and_add(&psevenbytestructs[9].i, 32U);
#ifdef TOKU_DEBUG_PARANOID
exit(EXIT_FAILURE); // with paranoid asserts, we should already have crashed
#else
exit(EXIT_SUCCESS); // without them, we should make it here
#endif
}
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <errno.h> #include <errno.h>
#include <util/rwlock.h> #include <util/rwlock.h>
#include <util/frwlock.h> #include <util/frwlock.h>
#include <portability/toku_atomic.h>
#include "toku_fair_rwlock.h" #include "toku_fair_rwlock.h"
#include <sys/types.h> #include <sys/types.h>
...@@ -138,8 +139,8 @@ void time_cas (void) { ...@@ -138,8 +139,8 @@ void time_cas (void) {
for (int t=0; t<T; t++) { for (int t=0; t<T; t++) {
gettimeofday(&start, NULL); gettimeofday(&start, NULL);
for (int i=0; i<N; i++) { for (int i=0; i<N; i++) {
{ int r = __sync_val_compare_and_swap(&myval, 0, 1); assert(r==0); } { int r = toku_sync_val_compare_and_swap(&myval, 0, 1); assert(r==0); }
{ int r = __sync_val_compare_and_swap(&myval, 1, 0); assert(r==1); } { int r = toku_sync_val_compare_and_swap(&myval, 1, 0); assert(r==1); }
} }
gettimeofday(&end, NULL); gettimeofday(&end, NULL);
double diff = 1e9*toku_tdiff(&end, &start)/N; double diff = 1e9*toku_tdiff(&end, &start)/N;
...@@ -325,7 +326,7 @@ static int log_counter=0; ...@@ -325,7 +326,7 @@ static int log_counter=0;
static void logit (int threadid, int loopid, char action) { static void logit (int threadid, int loopid, char action) {
//printf("%d %d %c\n", threadid, loopid, action); //printf("%d %d %c\n", threadid, loopid, action);
int my_log_counter = __sync_fetch_and_add(&log_counter, 1); int my_log_counter = toku_sync_fetch_and_add(&log_counter, 1);
assert(my_log_counter<N_LOG_ENTRIES); assert(my_log_counter<N_LOG_ENTRIES);
actionlog[my_log_counter].threadid = threadid; actionlog[my_log_counter].threadid = threadid;
actionlog[my_log_counter].loopid = loopid; actionlog[my_log_counter].loopid = loopid;
......
...@@ -11,12 +11,12 @@ ...@@ -11,12 +11,12 @@
#include "toku_portability.h" #include "toku_portability.h"
int main(void) { int main(void) {
int fd = toku_os_lock_file(__FILE__); int fd = toku_os_lock_file(__SRCFILE__);
assert(fd != -1); assert(fd != -1);
pid_t pid = fork(); pid_t pid = fork();
assert(pid != -1); assert(pid != -1);
if (pid == 0) { if (pid == 0) {
int fd2 = toku_os_lock_file(__FILE__); int fd2 = toku_os_lock_file(__SRCFILE__);
assert(fd2 == -1); assert(fd2 == -1);
return 0; return 0;
} else { } else {
......
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "Copyright (c) 2012 Tokutek Inc. All rights reserved."
#ident "$Id$"
#ifndef TOKU_ATOMIC_H
#define TOKU_ATOMIC_H
#include <config.h>
#include <toku_assert.h>
#include <stdbool.h>
#include <stdint.h>
#include <stddef.h>
__attribute__((const, always_inline))
static inline intptr_t which_cache_line(intptr_t addr) {
static const size_t assumed_cache_line_size = 64;
return addr / assumed_cache_line_size;
}
template <typename T> __attribute__((const, always_inline))
static inline bool crosses_boundary(T *addr, size_t width) {
const intptr_t int_addr = reinterpret_cast<intptr_t>(addr);
const intptr_t last_byte = int_addr + width - 1;
return which_cache_line(int_addr) != which_cache_line(last_byte);
}
template <typename T, typename U> __attribute__((always_inline))
static inline T toku_sync_fetch_and_add(T *addr, U diff) {
paranoid_invariant(!crosses_boundary(addr, sizeof *addr));
return __sync_fetch_and_add(addr, diff);
}
template <typename T, typename U> __attribute__((always_inline))
static inline T toku_sync_add_and_fetch(T *addr, U diff) {
paranoid_invariant(!crosses_boundary(addr, sizeof *addr));
return __sync_add_and_fetch(addr, diff);
}
template <typename T, typename U> __attribute__((always_inline))
static inline T toku_sync_fetch_and_sub(T *addr, U diff) {
paranoid_invariant(!crosses_boundary(addr, sizeof *addr));
return __sync_fetch_and_sub(addr, diff);
}
template <typename T, typename U> __attribute__((always_inline))
static inline T toku_sync_sub_and_fetch(T *addr, U diff) {
paranoid_invariant(!crosses_boundary(addr, sizeof *addr));
return __sync_sub_and_fetch(addr, diff);
}
template <typename T, typename U, typename V> __attribute__((always_inline))
static inline T toku_sync_val_compare_and_swap(T *addr, U oldval, V newval) {
paranoid_invariant(!crosses_boundary(addr, sizeof *addr));
return __sync_val_compare_and_swap(addr, oldval, newval);
}
template <typename T, typename U, typename V> __attribute__((always_inline))
static inline bool toku_sync_bool_compare_and_swap(T *addr, U oldval, V newval) {
paranoid_invariant(!crosses_boundary(addr, sizeof *addr));
return __sync_bool_compare_and_swap(addr, oldval, newval);
}
// in case you include this but not toku_portability.h
#pragma GCC poison __sync_fetch_and_add
#pragma GCC poison __sync_fetch_and_sub
#pragma GCC poison __sync_fetch_and_or
#pragma GCC poison __sync_fetch_and_and
#pragma GCC poison __sync_fetch_and_xor
#pragma GCC poison __sync_fetch_and_nand
#pragma GCC poison __sync_add_and_fetch
#pragma GCC poison __sync_sub_and_fetch
#pragma GCC poison __sync_or_and_fetch
#pragma GCC poison __sync_and_and_fetch
#pragma GCC poison __sync_xor_and_fetch
#pragma GCC poison __sync_nand_and_fetch
#pragma GCC poison __sync_bool_compare_and_swap
#pragma GCC poison __sync_val_compare_and_swap
#pragma GCC poison __sync_synchronize
#pragma GCC poison __sync_lock_test_and_set
#pragma GCC poison __sync_release
#endif // TOKU_ATOMIC_H
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <stdio.h> #include <stdio.h>
#include <memory.h> #include <memory.h>
#include <portability/toku_atomic.h>
struct toku_fair_rwlock_waiter_state { struct toku_fair_rwlock_waiter_state {
char is_read; char is_read;
...@@ -80,7 +81,7 @@ static __thread int tid=-1; ...@@ -80,7 +81,7 @@ static __thread int tid=-1;
static int next_tid=0; static int next_tid=0;
static int get_tid (void) { static int get_tid (void) {
if (tid==-1) { if (tid==-1) {
tid = __sync_fetch_and_add(&next_tid, 1); tid = toku_sync_fetch_and_add(&next_tid, 1);
} }
return tid; return tid;
} }
...@@ -108,10 +109,10 @@ int toku_fair_rwlock_rdlock_slow (toku_fair_rwlock_t *rwlock) { ...@@ -108,10 +109,10 @@ int toku_fair_rwlock_rdlock_slow (toku_fair_rwlock_t *rwlock) {
if (s_get_qcount(s)==0 && !s_get_wlock(s)) goto C2; if (s_get_qcount(s)==0 && !s_get_wlock(s)) goto C2;
else goto C3; else goto C3;
C2: C2:
if (__sync_bool_compare_and_swap(&rwlock->state, s, s_incr_rcount(s))) goto MU; if (toku_sync_bool_compare_and_swap(&rwlock->state, s, s_incr_rcount(s))) goto MU;
else goto R2; else goto R2;
C3: C3:
if (__sync_bool_compare_and_swap(&rwlock->state, s, s_incr_qcount(s))) goto E; if (toku_sync_bool_compare_and_swap(&rwlock->state, s, s_incr_qcount(s))) goto E;
else goto R2; else goto R2;
E: E:
// Put me into the queue. // Put me into the queue.
...@@ -145,7 +146,7 @@ int toku_fair_rwlock_rdlock_slow (toku_fair_rwlock_t *rwlock) { ...@@ -145,7 +146,7 @@ int toku_fair_rwlock_rdlock_slow (toku_fair_rwlock_t *rwlock) {
s = rwlock->state; s = rwlock->state;
goto C4; goto C4;
C4: C4:
if (__sync_bool_compare_and_swap(&rwlock->state, s, s_incr_rcount(s_decr_qcount(s)))) goto MU; if (toku_sync_bool_compare_and_swap(&rwlock->state, s, s_incr_rcount(s_decr_qcount(s)))) goto MU;
else goto R4; else goto R4;
MU: MU:
toku_mutex_unlock(&rwlock->mutex); toku_mutex_unlock(&rwlock->mutex);
...@@ -168,11 +169,11 @@ int toku_fair_rwlock_wrlock_slow (toku_fair_rwlock_t *rwlock) { ...@@ -168,11 +169,11 @@ int toku_fair_rwlock_wrlock_slow (toku_fair_rwlock_t *rwlock) {
if (s_get_qcount(s)==0 && !s_get_wlock(s) && s_get_rcount(s)==0) goto C2; if (s_get_qcount(s)==0 && !s_get_wlock(s) && s_get_rcount(s)==0) goto C2;
else goto C3; else goto C3;
C2: C2:
if (__sync_bool_compare_and_swap(&rwlock->state, s, s_set_wlock(s))) goto MU; if (toku_sync_bool_compare_and_swap(&rwlock->state, s, s_set_wlock(s))) goto MU;
else goto R2; else goto R2;
C3: C3:
L(C3); L(C3);
if (__sync_bool_compare_and_swap(&rwlock->state, s, s_incr_qcount(s))) goto E; if (toku_sync_bool_compare_and_swap(&rwlock->state, s, s_incr_qcount(s))) goto E;
else goto R2; else goto R2;
E: E:
LP(E, rwlock->state); LP(E, rwlock->state);
...@@ -202,7 +203,7 @@ int toku_fair_rwlock_wrlock_slow (toku_fair_rwlock_t *rwlock) { ...@@ -202,7 +203,7 @@ int toku_fair_rwlock_wrlock_slow (toku_fair_rwlock_t *rwlock) {
assert(!s_get_wlock(s)); assert(!s_get_wlock(s));
goto C4; goto C4;
C4: C4:
if (__sync_bool_compare_and_swap(&rwlock->state, s, s_set_wlock(s_decr_qcount(s)))) goto MU; if (toku_sync_bool_compare_and_swap(&rwlock->state, s, s_set_wlock(s_decr_qcount(s)))) goto MU;
else goto R4; else goto R4;
MU: MU:
toku_mutex_unlock(&rwlock->mutex); toku_mutex_unlock(&rwlock->mutex);
...@@ -223,11 +224,11 @@ int toku_fair_rwlock_unlock_r_slow (toku_fair_rwlock_t *rwlock) { ...@@ -223,11 +224,11 @@ int toku_fair_rwlock_unlock_r_slow (toku_fair_rwlock_t *rwlock) {
if (s_get_rcount(s)>1 || s_get_qcount(s)==0) goto C2; if (s_get_rcount(s)>1 || s_get_qcount(s)==0) goto C2;
else goto C3; else goto C3;
C2: C2:
if (__sync_bool_compare_and_swap(&rwlock->state, s, s_decr_rcount(s))) goto MU; if (toku_sync_bool_compare_and_swap(&rwlock->state, s, s_decr_rcount(s))) goto MU;
else goto R2; else goto R2;
C3: C3:
// rcount==1 and qcount>0 // rcount==1 and qcount>0
if (__sync_bool_compare_and_swap(&rwlock->state, s, s_decr_rcount(s))) goto WN; if (toku_sync_bool_compare_and_swap(&rwlock->state, s, s_decr_rcount(s))) goto WN;
else goto R2; else goto R2;
WN: WN:
LP(WN, rwlock->state); LP(WN, rwlock->state);
...@@ -253,10 +254,10 @@ int toku_fair_rwlock_unlock_w_slow (toku_fair_rwlock_t *rwlock) { ...@@ -253,10 +254,10 @@ int toku_fair_rwlock_unlock_w_slow (toku_fair_rwlock_t *rwlock) {
if (s_get_qcount(s)==0) goto C2; if (s_get_qcount(s)==0) goto C2;
else goto C3; else goto C3;
C2: C2:
if (__sync_bool_compare_and_swap(&rwlock->state, s, s_clear_wlock(s))) goto MU; if (toku_sync_bool_compare_and_swap(&rwlock->state, s, s_clear_wlock(s))) goto MU;
else goto R2; else goto R2;
C3: C3:
if (__sync_bool_compare_and_swap(&rwlock->state, s, s_clear_wlock(s))) goto WN; if (toku_sync_bool_compare_and_swap(&rwlock->state, s, s_clear_wlock(s))) goto WN;
else goto R2; else goto R2;
WN: WN:
LP(WN, rwlock->state); LP(WN, rwlock->state);
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "toku_pthread.h" #include "toku_pthread.h"
#include <portability/toku_atomic.h>
// Fair readers/writer locks. These are fair (meaning first-come first-served. No reader starvation, and no writer starvation). And they are // Fair readers/writer locks. These are fair (meaning first-come first-served. No reader starvation, and no writer starvation). And they are
// probably faster than the linux readers/writer locks (pthread_rwlock_t). // probably faster than the linux readers/writer locks (pthread_rwlock_t).
...@@ -84,7 +85,7 @@ static inline int toku_fair_rwlock_rdlock (toku_fair_rwlock_t *rwlock) { ...@@ -84,7 +85,7 @@ static inline int toku_fair_rwlock_rdlock (toku_fair_rwlock_t *rwlock) {
//if (s_get_qcount(s)==0 && !s_get_wlock(s)) goto C1; //if (s_get_qcount(s)==0 && !s_get_wlock(s)) goto C1;
else goto ML; else goto ML;
C1: C1:
if (__sync_bool_compare_and_swap(&rwlock->state, s, s_incr_rcount(s))) goto DONE; if (toku_sync_bool_compare_and_swap(&rwlock->state, s, s_incr_rcount(s))) goto DONE;
else goto START; else goto START;
DONE: DONE:
return 0; return 0;
...@@ -102,7 +103,7 @@ static inline int toku_fair_rwlock_wrlock (toku_fair_rwlock_t *rwlock) { ...@@ -102,7 +103,7 @@ static inline int toku_fair_rwlock_wrlock (toku_fair_rwlock_t *rwlock) {
if (s_get_qcount(s)==0 && !s_get_wlock(s) && s_get_rcount(s)==0) goto C1; if (s_get_qcount(s)==0 && !s_get_wlock(s) && s_get_rcount(s)==0) goto C1;
else goto ML; else goto ML;
C1: C1:
if (__sync_bool_compare_and_swap(&rwlock->state, s, s_set_wlock(s))) goto DONE; if (toku_sync_bool_compare_and_swap(&rwlock->state, s, s_set_wlock(s))) goto DONE;
else goto START; else goto START;
DONE: DONE:
return 0; return 0;
...@@ -125,7 +126,7 @@ static inline int toku_fair_rwlock_unlock (toku_fair_rwlock_t *rwlock) { ...@@ -125,7 +126,7 @@ static inline int toku_fair_rwlock_unlock (toku_fair_rwlock_t *rwlock) {
if (s_get_qcount(s)==0) goto wC1; if (s_get_qcount(s)==0) goto wC1;
else goto wML; else goto wML;
wC1: wC1:
if (__sync_bool_compare_and_swap(&rwlock->state, s, s_clear_wlock(s))) goto wDONE; if (toku_sync_bool_compare_and_swap(&rwlock->state, s, s_clear_wlock(s))) goto wDONE;
else goto wSTART; else goto wSTART;
wDONE: wDONE:
return 0; return 0;
...@@ -140,7 +141,7 @@ static inline int toku_fair_rwlock_unlock (toku_fair_rwlock_t *rwlock) { ...@@ -140,7 +141,7 @@ static inline int toku_fair_rwlock_unlock (toku_fair_rwlock_t *rwlock) {
if (s_get_rcount(s)>1 || s_get_qcount(s)==0) goto rC1; if (s_get_rcount(s)>1 || s_get_qcount(s)==0) goto rC1;
else goto rML; else goto rML;
rC1: rC1:
if (__sync_bool_compare_and_swap(&rwlock->state, s, s_decr_rcount(s))) goto rDONE; if (toku_sync_bool_compare_and_swap(&rwlock->state, s, s_decr_rcount(s))) goto rDONE;
else goto rSTART; else goto rSTART;
rDONE: rDONE:
return 0; return 0;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include "test.h" #include "test.h"
static size_t max(size_t a, size_t b) { static inline size_t max(size_t a, size_t b) {
return a > b ? a : b; return a > b ? a : b;
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment