Commit ad426894 authored by Yoni Fogel's avatar Yoni Fogel

[t:4905] closes #4905 Merge 4905b branch to main (removes pwrite lock)

git-svn-id: file:///svn/toku/tokudb@44315 c7de825b-a66e-492c-adef-691d508d4ae1
parent b10d5a7d
...@@ -80,7 +80,7 @@ set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -g3 -ggdb -O0") ...@@ -80,7 +80,7 @@ set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -g3 -ggdb -O0")
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG FORTIFY_SOURCE=2) set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG FORTIFY_SOURCE=2)
## set extra release flags, we overwrite this because the default passes -DNDEBUG and we don't want that ## set extra release flags, we overwrite this because the default passes -DNDEBUG and we don't want that
set(CMAKE_C_FLAGS_RELEASE "-O3") set(CMAKE_C_FLAGS_RELEASE "-g3 -ggdb -O3")
## check how to do inter-procedural optimization ## check how to do inter-procedural optimization
check_c_compiler_flag(-flto HAVE_CC_FLAG_FLTO) check_c_compiler_flag(-flto HAVE_CC_FLAG_FLTO)
......
...@@ -40,7 +40,6 @@ set(FT_SOURCES ...@@ -40,7 +40,6 @@ set(FT_SOURCES
ft_node-serialize.c ft_node-serialize.c
ft-node-deserialize.c ft-node-deserialize.c
ft-ops.c ft-ops.c
ft-pwrite.c
ft-serialize.c ft-serialize.c
ft-test-helpers.c ft-test-helpers.c
ft-verify.c ft-verify.c
......
This diff is collapsed.
...@@ -24,17 +24,17 @@ struct block_translation_pair { ...@@ -24,17 +24,17 @@ struct block_translation_pair {
}; };
void toku_blocktable_create_new(BLOCK_TABLE *btp); void toku_blocktable_create_new(BLOCK_TABLE *btp);
enum deserialize_error_code toku_blocktable_create_from_buffer(BLOCK_TABLE *btp, DISKOFF location_on_disk, DISKOFF size_on_disk, unsigned char *translation_buffer); enum deserialize_error_code toku_blocktable_create_from_buffer(int fd, BLOCK_TABLE *btp, DISKOFF location_on_disk, DISKOFF size_on_disk, unsigned char *translation_buffer);
void toku_blocktable_destroy(BLOCK_TABLE *btp); void toku_blocktable_destroy(BLOCK_TABLE *btp);
void toku_ft_lock(FT h); void toku_ft_lock(FT h);
void toku_ft_unlock(FT h); void toku_ft_unlock(FT h);
void toku_block_translation_note_start_checkpoint_unlocked(BLOCK_TABLE bt); void toku_block_translation_note_start_checkpoint_unlocked(BLOCK_TABLE bt);
void toku_block_translation_note_end_checkpoint(BLOCK_TABLE bt, int fd, FT h); void toku_block_translation_note_end_checkpoint(BLOCK_TABLE bt, int fd);
void toku_block_translation_note_failed_checkpoint(BLOCK_TABLE bt); void toku_block_translation_note_failed_checkpoint(BLOCK_TABLE bt);
void toku_block_translation_note_skipped_checkpoint(BLOCK_TABLE bt); void toku_block_translation_note_skipped_checkpoint(BLOCK_TABLE bt);
void toku_maybe_truncate_cachefile_on_open(BLOCK_TABLE bt, int fd, FT h); void toku_maybe_truncate_file_on_open(BLOCK_TABLE bt, int fd);
//Blocknums //Blocknums
void toku_allocate_blocknum(BLOCK_TABLE bt, BLOCKNUM *res, FT h); void toku_allocate_blocknum(BLOCK_TABLE bt, BLOCKNUM *res, FT h);
...@@ -43,16 +43,16 @@ void toku_free_blocknum(BLOCK_TABLE bt, BLOCKNUM *b, FT h, BOOL for_checkpoint); ...@@ -43,16 +43,16 @@ void toku_free_blocknum(BLOCK_TABLE bt, BLOCKNUM *b, FT h, BOOL for_checkpoint);
void toku_verify_blocknum_allocated(BLOCK_TABLE bt, BLOCKNUM b); void toku_verify_blocknum_allocated(BLOCK_TABLE bt, BLOCKNUM b);
void toku_block_verify_no_data_blocks_except_root_unlocked(BLOCK_TABLE bt, BLOCKNUM root); void toku_block_verify_no_data_blocks_except_root_unlocked(BLOCK_TABLE bt, BLOCKNUM root);
void toku_block_verify_no_free_blocknums(BLOCK_TABLE bt); void toku_block_verify_no_free_blocknums(BLOCK_TABLE bt);
void toku_realloc_descriptor_on_disk(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT h); void toku_realloc_descriptor_on_disk(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT h, int fd);
void toku_realloc_descriptor_on_disk_unlocked(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT h); void toku_realloc_descriptor_on_disk_unlocked(BLOCK_TABLE bt, DISKOFF size, DISKOFF *offset, FT h);
void toku_get_descriptor_offset_size(BLOCK_TABLE bt, DISKOFF *offset, DISKOFF *size); void toku_get_descriptor_offset_size(BLOCK_TABLE bt, DISKOFF *offset, DISKOFF *size);
//Blocks and Blocknums //Blocks and Blocknums
void toku_blocknum_realloc_on_disk(BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT h, BOOL for_checkpoint); void toku_blocknum_realloc_on_disk(BLOCK_TABLE bt, BLOCKNUM b, DISKOFF size, DISKOFF *offset, FT ft, int fd, BOOL for_checkpoint);
void toku_translate_blocknum_to_offset_size(BLOCK_TABLE bt, BLOCKNUM b, DISKOFF *offset, DISKOFF *size); void toku_translate_blocknum_to_offset_size(BLOCK_TABLE bt, BLOCKNUM b, DISKOFF *offset, DISKOFF *size);
//Serialization //Serialization
void toku_serialize_translation_to_wbuf(BLOCK_TABLE bt, struct wbuf *w, int64_t *address, int64_t *size); void toku_serialize_translation_to_wbuf(BLOCK_TABLE bt, int fd, struct wbuf *w, int64_t *address, int64_t *size);
void toku_block_table_swap_for_redirect(BLOCK_TABLE old_bt, BLOCK_TABLE new_bt); void toku_block_table_swap_for_redirect(BLOCK_TABLE old_bt, BLOCK_TABLE new_bt);
......
...@@ -756,16 +756,6 @@ toku_cachefile_get_fd (CACHEFILE cf) { ...@@ -756,16 +756,6 @@ toku_cachefile_get_fd (CACHEFILE cf) {
return cf->fd; return cf->fd;
} }
int
toku_cachefile_truncate (CACHEFILE cf, toku_off_t new_size) {
int r;
r = ftruncate(cf->fd, new_size);
if (r != 0) {
r = errno;
}
return r;
}
static CACHEFILE remove_cf_from_list_locked (CACHEFILE cf, CACHEFILE list) { static CACHEFILE remove_cf_from_list_locked (CACHEFILE cf, CACHEFILE list) {
if (list==0) return 0; if (list==0) return 0;
else if (list==cf) { else if (list==cf) {
...@@ -1188,7 +1178,7 @@ static void cachetable_write_locked_pair(CACHETABLE ct, PAIR p) { ...@@ -1188,7 +1178,7 @@ static void cachetable_write_locked_pair(CACHETABLE ct, PAIR p) {
cachetable_change_pair_attr(ct, old_attr, new_attr); cachetable_change_pair_attr(ct, old_attr, new_attr);
} }
} }
nb_mutex_write_unlock(&p->disk_nb_mutex); nb_mutex_unlock(&p->disk_nb_mutex);
// the pair is no longer dirty once written // the pair is no longer dirty once written
p->dirty = CACHETABLE_CLEAN; p->dirty = CACHETABLE_CLEAN;
...@@ -1202,7 +1192,7 @@ static void cachetable_write_locked_pair(CACHETABLE ct, PAIR p) { ...@@ -1202,7 +1192,7 @@ static void cachetable_write_locked_pair(CACHETABLE ct, PAIR p) {
static void cachetable_complete_write_pair (CACHETABLE ct, PAIR p, BOOL do_remove, BOOL* destroyed) { static void cachetable_complete_write_pair (CACHETABLE ct, PAIR p, BOOL do_remove, BOOL* destroyed) {
p->cq = 0; p->cq = 0;
nb_mutex_write_unlock(&p->value_nb_mutex); nb_mutex_unlock(&p->value_nb_mutex);
if (do_remove) { if (do_remove) {
cachetable_maybe_remove_and_free_pair(ct, p, destroyed); cachetable_maybe_remove_and_free_pair(ct, p, destroyed);
} }
...@@ -1338,7 +1328,7 @@ static void do_partial_eviction(CACHETABLE ct, PAIR p) { ...@@ -1338,7 +1328,7 @@ static void do_partial_eviction(CACHETABLE ct, PAIR p) {
workqueue_enq(p->cq, &p->asyncwork, 1); workqueue_enq(p->cq, &p->asyncwork, 1);
} }
else { else {
nb_mutex_write_unlock(&p->value_nb_mutex); nb_mutex_unlock(&p->value_nb_mutex);
} }
} }
...@@ -1437,7 +1427,7 @@ static void maybe_flush_some (CACHETABLE ct, long size) { ...@@ -1437,7 +1427,7 @@ static void maybe_flush_some (CACHETABLE ct, long size) {
// set up a completion queue. // set up a completion queue.
// So, a completion queue cannot exist // So, a completion queue cannot exist
assert(!curr_in_clock->cq); assert(!curr_in_clock->cq);
nb_mutex_write_unlock(&curr_in_clock->value_nb_mutex); nb_mutex_unlock(&curr_in_clock->value_nb_mutex);
} }
} }
else { else {
...@@ -1643,7 +1633,7 @@ static void checkpoint_cloned_pair(WORKITEM wi) { ...@@ -1643,7 +1633,7 @@ static void checkpoint_cloned_pair(WORKITEM wi) {
&new_attr, &new_attr,
TRUE //is_clone TRUE //is_clone
); );
nb_mutex_write_unlock(&p->disk_nb_mutex); nb_mutex_unlock(&p->disk_nb_mutex);
ct->n_checkpoint_clones_running--; ct->n_checkpoint_clones_running--;
if (ct->n_checkpoint_clones_running == 0) { if (ct->n_checkpoint_clones_running == 0) {
toku_cond_broadcast(&ct->clones_background_wait); toku_cond_broadcast(&ct->clones_background_wait);
...@@ -1737,7 +1727,7 @@ write_pair_for_checkpoint_thread (CACHETABLE ct, PAIR p) ...@@ -1737,7 +1727,7 @@ write_pair_for_checkpoint_thread (CACHETABLE ct, PAIR p)
// now release value_nb_mutex, before we write the PAIR out // now release value_nb_mutex, before we write the PAIR out
// so that the PAIR is available to client threads // so that the PAIR is available to client threads
nb_mutex_write_unlock(&p->value_nb_mutex); // didn't call cachetable_write_pair so we have to unlock it ourselves. nb_mutex_unlock(&p->value_nb_mutex); // didn't call cachetable_write_pair so we have to unlock it ourselves.
if (p->clone_callback) { if (p->clone_callback) {
// note that pending lock is not needed here because // note that pending lock is not needed here because
// we KNOW we are in the middle of a checkpoint // we KNOW we are in the middle of a checkpoint
...@@ -1750,7 +1740,7 @@ write_pair_for_checkpoint_thread (CACHETABLE ct, PAIR p) ...@@ -1750,7 +1740,7 @@ write_pair_for_checkpoint_thread (CACHETABLE ct, PAIR p)
&attr, &attr,
TRUE //is_clone TRUE //is_clone
); );
nb_mutex_write_unlock(&p->disk_nb_mutex); nb_mutex_unlock(&p->disk_nb_mutex);
} }
} }
else { else {
...@@ -1767,7 +1757,7 @@ write_pair_for_checkpoint_thread (CACHETABLE ct, PAIR p) ...@@ -1767,7 +1757,7 @@ write_pair_for_checkpoint_thread (CACHETABLE ct, PAIR p)
workqueue_enq(p->cq, &p->asyncwork, 1); workqueue_enq(p->cq, &p->asyncwork, 1);
} }
else { else {
nb_mutex_write_unlock(&p->value_nb_mutex); nb_mutex_unlock(&p->value_nb_mutex);
} }
} }
} }
...@@ -1950,7 +1940,7 @@ do_partial_fetch( ...@@ -1950,7 +1940,7 @@ do_partial_fetch(
p->attr = new_attr; p->attr = new_attr;
cachetable_change_pair_attr(ct, old_attr, new_attr); cachetable_change_pair_attr(ct, old_attr, new_attr);
p->state = CTPAIR_IDLE; p->state = CTPAIR_IDLE;
nb_mutex_write_unlock(&p->disk_nb_mutex); nb_mutex_unlock(&p->disk_nb_mutex);
if (keep_pair_locked) { if (keep_pair_locked) {
// if the caller wants the pair to remain locked // if the caller wants the pair to remain locked
// that means the caller requests continued // that means the caller requests continued
...@@ -1964,7 +1954,7 @@ do_partial_fetch( ...@@ -1964,7 +1954,7 @@ do_partial_fetch(
workqueue_enq(p->cq, &p->asyncwork, 1); workqueue_enq(p->cq, &p->asyncwork, 1);
} }
else { else {
nb_mutex_write_unlock(&p->value_nb_mutex); nb_mutex_unlock(&p->value_nb_mutex);
} }
} }
} }
...@@ -1990,7 +1980,7 @@ void toku_cachetable_pf_pinned_pair( ...@@ -1990,7 +1980,7 @@ void toku_cachetable_pf_pinned_pair(
cachetable_unlock(cf->cachetable); cachetable_unlock(cf->cachetable);
pf_callback(value, p->disk_data, read_extraargs, fd, &attr); pf_callback(value, p->disk_data, read_extraargs, fd, &attr);
cachetable_lock(cf->cachetable); cachetable_lock(cf->cachetable);
nb_mutex_write_unlock(&p->disk_nb_mutex); nb_mutex_unlock(&p->disk_nb_mutex);
cachetable_unlock(cf->cachetable); cachetable_unlock(cf->cachetable);
} }
...@@ -2077,7 +2067,7 @@ static void cachetable_fetch_pair( ...@@ -2077,7 +2067,7 @@ static void cachetable_fetch_pair(
p->attr = attr; p->attr = attr;
cachetable_add_pair_attr(ct, attr); cachetable_add_pair_attr(ct, attr);
p->state = CTPAIR_IDLE; p->state = CTPAIR_IDLE;
nb_mutex_write_unlock(&p->disk_nb_mutex); nb_mutex_unlock(&p->disk_nb_mutex);
if (keep_pair_locked) { if (keep_pair_locked) {
// if the caller wants the pair to remain locked // if the caller wants the pair to remain locked
// that means the caller requests continued // that means the caller requests continued
...@@ -2091,7 +2081,7 @@ static void cachetable_fetch_pair( ...@@ -2091,7 +2081,7 @@ static void cachetable_fetch_pair(
workqueue_enq(p->cq, &p->asyncwork, 1); workqueue_enq(p->cq, &p->asyncwork, 1);
} }
else { else {
nb_mutex_write_unlock(&p->value_nb_mutex); nb_mutex_unlock(&p->value_nb_mutex);
} }
} }
if (0) printf("%s:%d %"PRId64" complete\n", __FUNCTION__, __LINE__, key.b); if (0) printf("%s:%d %"PRId64" complete\n", __FUNCTION__, __LINE__, key.b);
...@@ -2374,7 +2364,7 @@ cachetable_unpin_internal(CACHEFILE cachefile, CACHEKEY key, u_int32_t fullhash, ...@@ -2374,7 +2364,7 @@ cachetable_unpin_internal(CACHEFILE cachefile, CACHEKEY key, u_int32_t fullhash,
// So, we should assert that a completion queue does not // So, we should assert that a completion queue does not
// exist // exist
assert(!p->cq); assert(!p->cq);
nb_mutex_write_unlock(&p->value_nb_mutex); nb_mutex_unlock(&p->value_nb_mutex);
if (dirty) p->dirty = CACHETABLE_DIRTY; if (dirty) p->dirty = CACHETABLE_DIRTY;
if (attr.is_valid) { if (attr.is_valid) {
PAIR_ATTR old_attr = p->attr; PAIR_ATTR old_attr = p->attr;
...@@ -2528,7 +2518,7 @@ int toku_cachetable_get_and_pin_nonblocking ( ...@@ -2528,7 +2518,7 @@ int toku_cachetable_get_and_pin_nonblocking (
workqueue_enq(p->cq, &p->asyncwork, 1); workqueue_enq(p->cq, &p->asyncwork, 1);
} }
else { else {
nb_mutex_write_unlock(&p->value_nb_mutex); nb_mutex_unlock(&p->value_nb_mutex);
} }
cachetable_unlock(ct); cachetable_unlock(ct);
return TOKUDB_TRY_AGAIN; return TOKUDB_TRY_AGAIN;
...@@ -2686,7 +2676,7 @@ int toku_cachefile_prefetch(CACHEFILE cf, CACHEKEY key, u_int32_t fullhash, ...@@ -2686,7 +2676,7 @@ int toku_cachefile_prefetch(CACHEFILE cf, CACHEKEY key, u_int32_t fullhash,
// sanity check, we already have an assert // sanity check, we already have an assert
// before locking the PAIR // before locking the PAIR
assert(!p->cq); assert(!p->cq);
nb_mutex_write_unlock(&p->value_nb_mutex); nb_mutex_unlock(&p->value_nb_mutex);
} }
} }
cachetable_unlock(ct); cachetable_unlock(ct);
...@@ -2987,7 +2977,7 @@ static void cachetable_flush_cachefile(CACHETABLE ct, CACHEFILE cf) { ...@@ -2987,7 +2977,7 @@ static void cachetable_flush_cachefile(CACHETABLE ct, CACHEFILE cf) {
PAIR p = workitem_arg(wi); PAIR p = workitem_arg(wi);
p->cq = 0; p->cq = 0;
//Some other thread owned the lock, but transferred ownership to the thread executing this function //Some other thread owned the lock, but transferred ownership to the thread executing this function
nb_mutex_write_unlock(&p->value_nb_mutex); //Release the lock, no one has a pin, per our assumptions above. nb_mutex_unlock(&p->value_nb_mutex); //Release the lock, no one has a pin, per our assumptions above.
BOOL destroyed; BOOL destroyed;
cachetable_maybe_remove_and_free_pair(ct, p, &destroyed); cachetable_maybe_remove_and_free_pair(ct, p, &destroyed);
} }
...@@ -3113,8 +3103,8 @@ int toku_cachetable_unpin_and_remove ( ...@@ -3113,8 +3103,8 @@ int toku_cachetable_unpin_and_remove (
// we must not have a completion queue // we must not have a completion queue
// lying around, as we may create one now // lying around, as we may create one now
assert(!p->cq); assert(!p->cq);
nb_mutex_write_unlock(&p->value_nb_mutex); nb_mutex_unlock(&p->value_nb_mutex);
nb_mutex_write_unlock(&p->disk_nb_mutex); nb_mutex_unlock(&p->disk_nb_mutex);
// //
// As of Dr. Noga, only these threads may be // As of Dr. Noga, only these threads may be
// blocked waiting to lock this PAIR: // blocked waiting to lock this PAIR:
...@@ -3193,7 +3183,7 @@ int toku_cachetable_unpin_and_remove ( ...@@ -3193,7 +3183,7 @@ int toku_cachetable_unpin_and_remove (
// make sure that our assumption is valid. // make sure that our assumption is valid.
assert(!p->checkpoint_pending); assert(!p->checkpoint_pending);
assert(p->attr.cache_pressure_size == 0); assert(p->attr.cache_pressure_size == 0);
nb_mutex_write_unlock(&p->value_nb_mutex); nb_mutex_unlock(&p->value_nb_mutex);
// Because we assume it is just the checkpoint thread // Because we assume it is just the checkpoint thread
// that may have been blocked (as argued above), // that may have been blocked (as argued above),
// it is safe to simply remove the PAIR from the // it is safe to simply remove the PAIR from the
...@@ -3924,7 +3914,7 @@ toku_cleaner_thread (void *cachetable_v) ...@@ -3924,7 +3914,7 @@ toku_cleaner_thread (void *cachetable_v)
// don't need to unlock it if the cleaner callback is called. // don't need to unlock it if the cleaner callback is called.
if (!cleaner_callback_called) { if (!cleaner_callback_called) {
assert(!best_pair->cq); assert(!best_pair->cq);
nb_mutex_write_unlock(&best_pair->value_nb_mutex); nb_mutex_unlock(&best_pair->value_nb_mutex);
} }
// We need to make sure the cachefile sticks around so a close // We need to make sure the cachefile sticks around so a close
// can't come destroy it. That's the purpose of this // can't come destroy it. That's the purpose of this
......
...@@ -430,9 +430,6 @@ void toku_cachefile_unlink_on_close(CACHEFILE cf); ...@@ -430,9 +430,6 @@ void toku_cachefile_unlink_on_close(CACHEFILE cf);
// is this cachefile marked as unlink on close? // is this cachefile marked as unlink on close?
bool toku_cachefile_is_unlink_on_close(CACHEFILE cf); bool toku_cachefile_is_unlink_on_close(CACHEFILE cf);
// Truncate a cachefile
int toku_cachefile_truncate (CACHEFILE cf, toku_off_t new_size);
// Return the logger associated with the cachefile // Return the logger associated with the cachefile
TOKULOGGER toku_cachefile_logger (CACHEFILE); TOKULOGGER toku_cachefile_logger (CACHEFILE);
......
...@@ -3262,7 +3262,7 @@ ft_handle_open(FT_HANDLE ft_h, const char *fname_in_env, int is_create, int only ...@@ -3262,7 +3262,7 @@ ft_handle_open(FT_HANDLE ft_h, const char *fname_in_env, int is_create, int only
//Opening a brt may restore to previous checkpoint. Truncate if necessary. //Opening a brt may restore to previous checkpoint. Truncate if necessary.
{ {
int fd = toku_cachefile_get_fd (ft->cf); int fd = toku_cachefile_get_fd (ft->cf);
toku_maybe_truncate_cachefile_on_open(ft->blocktable, fd, ft); toku_maybe_truncate_file_on_open(ft->blocktable, fd);
} }
r = 0; r = 0;
......
...@@ -245,11 +245,12 @@ void toku_ft_layer_destroy(void); ...@@ -245,11 +245,12 @@ void toku_ft_layer_destroy(void);
void toku_ft_serialize_layer_init(void); void toku_ft_serialize_layer_init(void);
void toku_ft_serialize_layer_destroy(void); void toku_ft_serialize_layer_destroy(void);
void toku_maybe_truncate_cachefile (CACHEFILE cf, int fd, u_int64_t size_used); void toku_maybe_truncate_file (int fd, uint64_t size_used, uint64_t expected_size, uint64_t *new_size);
// Effect: truncate file if overallocated by at least 32MiB // Effect: truncate file if overallocated by at least 32MiB
int maybe_preallocate_in_file (int fd, u_int64_t size) __attribute__ ((warn_unused_result)); void toku_maybe_preallocate_in_file (int fd, int64_t size, int64_t expected_size, int64_t *new_size);
// Effect: If file size is less than SIZE, make it bigger by either doubling it or growing by 16MB whichever is less. // Effect: make the file bigger by either doubling it or growing by 16MiB whichever is less, until it is at least size
// Return 0 on success, otherwise an error number.
void toku_ft_suppress_recovery_logs (FT_HANDLE brt, TOKUTXN txn); void toku_ft_suppress_recovery_logs (FT_HANDLE brt, TOKUTXN txn);
// Effect: suppresses recovery logs // Effect: suppresses recovery logs
......
/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id: ft-serialize.c 43686 2012-05-18 23:21:00Z leifwalsh $"
#ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "includes.h"
#include "sort.h"
#include "threadpool.h"
#include "ft-pwrite.h"
#include <compress.h>
//TODO(fizzfaldt): determine if this is necessary AT ALL and try to delete
// This mutex protects pwrite from running in parallel, and also protects modifications to the block allocator.
static toku_mutex_t pwrite_mutex = { PTHREAD_MUTEX_INITIALIZER };
static int pwrite_is_locked=0;
void
toku_lock_for_pwrite(void) {
// Locks the pwrite_mutex.
toku_mutex_lock(&pwrite_mutex);
pwrite_is_locked = 1;
}
void
toku_unlock_for_pwrite(void) {
pwrite_is_locked = 0;
toku_mutex_unlock(&pwrite_mutex);
}
void
toku_full_pwrite_extend(int fd, const void *buf, size_t count, toku_off_t offset)
// requires that the pwrite has been locked
// On failure, this does not return (an assertion fails or something).
{
invariant(pwrite_is_locked);
{
int r = maybe_preallocate_in_file(fd, offset+count);
lazy_assert_zero(r);
}
toku_os_full_pwrite(fd, buf, count, offset);
}
/* -*- mode: C; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id: ft-serialize.c 43686 2012-05-18 23:21:00Z leifwalsh $"
#ident "Copyright (c) 2007-2010 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#ifndef FT_PWRITE_H
#define FT_PWRITE_H
void toku_lock_for_pwrite(void);
void toku_unlock_for_pwrite(void);
void toku_full_pwrite_extend(int fd, const void *buf, size_t count, toku_off_t offset);
#endif
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
#include "includes.h" #include "includes.h"
#include "sort.h" #include "sort.h"
#include "threadpool.h" #include "threadpool.h"
#include "ft-pwrite.h"
#include <compress.h> #include <compress.h>
#if defined(HAVE_CILK) #if defined(HAVE_CILK)
...@@ -62,10 +61,8 @@ toku_serialize_descriptor_contents_to_fd(int fd, const DESCRIPTOR desc, DISKOFF ...@@ -62,10 +61,8 @@ toku_serialize_descriptor_contents_to_fd(int fd, const DESCRIPTOR desc, DISKOFF
} }
lazy_assert(w.ndone==w.size); lazy_assert(w.ndone==w.size);
{ {
toku_lock_for_pwrite();
//Actual Write translation table //Actual Write translation table
toku_full_pwrite_extend(fd, w.buf, size, offset); toku_os_full_pwrite(fd, w.buf, size, offset);
toku_unlock_for_pwrite();
} }
toku_free(w.buf); toku_free(w.buf);
return r; return r;
...@@ -106,10 +103,8 @@ deserialize_descriptor_from(int fd, BLOCK_TABLE bt, DESCRIPTOR desc, int layout_ ...@@ -106,10 +103,8 @@ deserialize_descriptor_from(int fd, BLOCK_TABLE bt, DESCRIPTOR desc, int layout_
{ {
XMALLOC_N(size, dbuf); XMALLOC_N(size, dbuf);
{ {
toku_lock_for_pwrite();
ssize_t r = toku_os_pread(fd, dbuf, size, offset); ssize_t r = toku_os_pread(fd, dbuf, size, offset);
lazy_assert(r==size); lazy_assert(r==size);
toku_unlock_for_pwrite();
} }
{ {
// check the checksum // check the checksum
...@@ -199,7 +194,6 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) ...@@ -199,7 +194,6 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
//Load translation table //Load translation table
{ {
toku_lock_for_pwrite();
unsigned char *XMALLOC_N(translation_size_on_disk, tbuf); unsigned char *XMALLOC_N(translation_size_on_disk, tbuf);
{ {
// This cast is messed up in 32-bits if the block translation // This cast is messed up in 32-bits if the block translation
...@@ -209,9 +203,9 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version) ...@@ -209,9 +203,9 @@ deserialize_ft_versioned(int fd, struct rbuf *rb, FT *ftp, uint32_t version)
translation_address_on_disk); translation_address_on_disk);
lazy_assert(readsz == translation_size_on_disk); lazy_assert(readsz == translation_size_on_disk);
} }
toku_unlock_for_pwrite();
// Create table and read in data. // Create table and read in data.
e = toku_blocktable_create_from_buffer(&ft->blocktable, e = toku_blocktable_create_from_buffer(fd,
&ft->blocktable,
translation_address_on_disk, translation_address_on_disk,
translation_size_on_disk, translation_size_on_disk,
tbuf); tbuf);
...@@ -712,7 +706,7 @@ int toku_serialize_ft_to (int fd, FT_HEADER h, BLOCK_TABLE blocktable, CACHEFILE ...@@ -712,7 +706,7 @@ int toku_serialize_ft_to (int fd, FT_HEADER h, BLOCK_TABLE blocktable, CACHEFILE
int64_t address_translation; int64_t address_translation;
{ {
//Must serialize translation first, to get address,size for header. //Must serialize translation first, to get address,size for header.
toku_serialize_translation_to_wbuf(blocktable, &w_translation, toku_serialize_translation_to_wbuf(blocktable, fd, &w_translation,
&address_translation, &address_translation,
&size_translation); &size_translation);
lazy_assert(size_translation==w_translation.size); lazy_assert(size_translation==w_translation.size);
...@@ -727,10 +721,9 @@ int toku_serialize_ft_to (int fd, FT_HEADER h, BLOCK_TABLE blocktable, CACHEFILE ...@@ -727,10 +721,9 @@ int toku_serialize_ft_to (int fd, FT_HEADER h, BLOCK_TABLE blocktable, CACHEFILE
} }
lazy_assert(w_main.ndone==size_main); lazy_assert(w_main.ndone==size_main);
} }
toku_lock_for_pwrite();
{ {
//Actual Write translation table //Actual Write translation table
toku_full_pwrite_extend(fd, w_translation.buf, toku_os_full_pwrite(fd, w_translation.buf,
size_translation, address_translation); size_translation, address_translation);
} }
{ {
...@@ -751,11 +744,10 @@ int toku_serialize_ft_to (int fd, FT_HEADER h, BLOCK_TABLE blocktable, CACHEFILE ...@@ -751,11 +744,10 @@ int toku_serialize_ft_to (int fd, FT_HEADER h, BLOCK_TABLE blocktable, CACHEFILE
// Beginning (0) or BLOCK_ALLOCATOR_HEADER_RESERVE // Beginning (0) or BLOCK_ALLOCATOR_HEADER_RESERVE
toku_off_t main_offset; toku_off_t main_offset;
main_offset = (h->checkpoint_count & 0x1) ? 0 : BLOCK_ALLOCATOR_HEADER_RESERVE; main_offset = (h->checkpoint_count & 0x1) ? 0 : BLOCK_ALLOCATOR_HEADER_RESERVE;
toku_full_pwrite_extend(fd, w_main.buf, w_main.ndone, main_offset); toku_os_full_pwrite(fd, w_main.buf, w_main.ndone, main_offset);
} }
} }
toku_free(w_main.buf); toku_free(w_main.buf);
toku_free(w_translation.buf); toku_free(w_translation.buf);
toku_unlock_for_pwrite();
return rr; return rr;
} }
...@@ -251,7 +251,7 @@ ft_end_checkpoint (CACHEFILE UU(cachefile), int fd, void *header_v) { ...@@ -251,7 +251,7 @@ ft_end_checkpoint (CACHEFILE UU(cachefile), int fd, void *header_v) {
int r = ft->panic; int r = ft->panic;
if (r==0) { if (r==0) {
assert(ft->h->type == FT_CURRENT); assert(ft->h->type == FT_CURRENT);
toku_block_translation_note_end_checkpoint(ft->blocktable, fd, ft); toku_block_translation_note_end_checkpoint(ft->blocktable, fd);
} }
if (ft->checkpoint_header) { // could be NULL only if panic was true at begin_checkpoint if (ft->checkpoint_header) { // could be NULL only if panic was true at begin_checkpoint
toku_free(ft->checkpoint_header); toku_free(ft->checkpoint_header);
...@@ -939,7 +939,7 @@ toku_update_descriptor(FT h, DESCRIPTOR d, int fd) ...@@ -939,7 +939,7 @@ toku_update_descriptor(FT h, DESCRIPTOR d, int fd)
int r = 0; int r = 0;
DISKOFF offset; DISKOFF offset;
// 4 for checksum // 4 for checksum
toku_realloc_descriptor_on_disk(h->blocktable, toku_serialize_descriptor_size(d)+4, &offset, h); toku_realloc_descriptor_on_disk(h->blocktable, toku_serialize_descriptor_size(d)+4, &offset, h, fd);
r = toku_serialize_descriptor_contents_to_fd(fd, d, offset); r = toku_serialize_descriptor_contents_to_fd(fd, d, offset);
if (r) { if (r) {
goto cleanup; goto cleanup;
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
#include "includes.h" #include "includes.h"
#include "sort.h" #include "sort.h"
#include "threadpool.h" #include "threadpool.h"
#include "ft-pwrite.h"
#include <compress.h> #include <compress.h>
#if defined(HAVE_CILK) #if defined(HAVE_CILK)
...@@ -80,60 +79,54 @@ toku_ft_serialize_layer_destroy(void) { ...@@ -80,60 +79,54 @@ toku_ft_serialize_layer_destroy(void) {
enum {FILE_CHANGE_INCREMENT = (16<<20)}; enum {FILE_CHANGE_INCREMENT = (16<<20)};
static inline u_int64_t static inline uint64_t
alignup64(u_int64_t a, u_int64_t b) { alignup64(uint64_t a, uint64_t b) {
return ((a+b-1)/b)*b; return ((a+b-1)/b)*b;
} }
//Race condition if ydb lock is split. // safe_file_size_lock must be held.
//Ydb lock is held when this function is called.
//Not going to truncate and delete (redirect to devnull) at same time.
void void
toku_maybe_truncate_cachefile (CACHEFILE cf, int fd, u_int64_t size_used) toku_maybe_truncate_file (int fd, uint64_t size_used, uint64_t expected_size, uint64_t *new_sizep)
// Effect: If file size >= SIZE+32MiB, reduce file size. // Effect: If file size >= SIZE+32MiB, reduce file size.
// (32 instead of 16.. hysteresis). // (32 instead of 16.. hysteresis).
// Return 0 on success, otherwise an error number. // Return 0 on success, otherwise an error number.
{ {
//Check file size before taking pwrite lock to reduce likelihood of taking
//the pwrite lock needlessly.
//Check file size after taking lock to avoid race conditions.
int64_t file_size; int64_t file_size;
{ {
int r = toku_os_get_file_size(fd, &file_size); int r = toku_os_get_file_size(fd, &file_size);
lazy_assert_zero(r); lazy_assert_zero(r);
invariant(file_size >= 0); invariant(file_size >= 0);
} }
invariant(expected_size == (uint64_t)file_size);
// If file space is overallocated by at least 32M // If file space is overallocated by at least 32M
if ((u_int64_t)file_size >= size_used + (2*FILE_CHANGE_INCREMENT)) { if ((uint64_t)file_size >= size_used + (2*FILE_CHANGE_INCREMENT)) {
toku_lock_for_pwrite();
{
int r = toku_os_get_file_size(fd, &file_size);
lazy_assert_zero(r);
invariant(file_size >= 0);
}
if ((u_int64_t)file_size >= size_used + (2*FILE_CHANGE_INCREMENT)) {
toku_off_t new_size = alignup64(size_used, (2*FILE_CHANGE_INCREMENT)); //Truncate to new size_used. toku_off_t new_size = alignup64(size_used, (2*FILE_CHANGE_INCREMENT)); //Truncate to new size_used.
invariant(new_size < file_size); invariant(new_size < file_size);
int r = toku_cachefile_truncate(cf, new_size); invariant(new_size >= 0);
int r = ftruncate(fd, new_size);
lazy_assert_zero(r); lazy_assert_zero(r);
*new_sizep = new_size;
} }
toku_unlock_for_pwrite(); else {
*new_sizep = file_size;
} }
return; return;
} }
static u_int64_t static int64_t
umin64(u_int64_t a, u_int64_t b) { min64(int64_t a, int64_t b) {
if (a<b) return a; if (a<b) return a;
return b; return b;
} }
int void
maybe_preallocate_in_file (int fd, u_int64_t size) toku_maybe_preallocate_in_file (int fd, int64_t size, int64_t expected_size, int64_t *new_size)
// Effect: If file size is less than SIZE, make it bigger by either doubling it or growing by 16MiB whichever is less. // Effect: make the file bigger by either doubling it or growing by 16MiB whichever is less, until it is at least size
// Return 0 on success, otherwise an error number. // Return 0 on success, otherwise an error number.
{ {
int64_t file_size; int64_t file_size;
//TODO(yoni): Allow variable stripe_width (perhaps from ft) for larger raids
const uint64_t stripe_width = 4096;
{ {
int r = toku_os_get_file_size(fd, &file_size); int r = toku_os_get_file_size(fd, &file_size);
if (r != 0) { // debug #2463 if (r != 0) { // debug #2463
...@@ -143,16 +136,28 @@ maybe_preallocate_in_file (int fd, u_int64_t size) ...@@ -143,16 +136,28 @@ maybe_preallocate_in_file (int fd, u_int64_t size)
lazy_assert_zero(r); lazy_assert_zero(r);
} }
invariant(file_size >= 0); invariant(file_size >= 0);
if ((u_int64_t)file_size < size) { invariant(expected_size == file_size);
const int N = umin64(size, FILE_CHANGE_INCREMENT); // Double the size of the file, or add 16MiB, whichever is less. // We want to double the size of the file, or add 16MiB, whichever is less.
char *MALLOC_N(N, wbuf); // We emulate calling this function repeatedly until it satisfies the request.
memset(wbuf, 0, N); int64_t to_write = 0;
toku_off_t start_write = alignup64(file_size, 4096); if (file_size == 0) {
// Prevent infinite loop by starting with stripe_width as a base case.
to_write = stripe_width;
}
while (file_size + to_write < size) {
to_write += alignup64(min64(file_size + to_write, FILE_CHANGE_INCREMENT), stripe_width);
}
if (to_write > 0) {
char *XCALLOC_N(to_write, wbuf);
toku_off_t start_write = alignup64(file_size, stripe_width);
invariant(start_write >= file_size); invariant(start_write >= file_size);
toku_os_full_pwrite(fd, wbuf, N, start_write); toku_os_full_pwrite(fd, wbuf, to_write, start_write);
toku_free(wbuf); toku_free(wbuf);
*new_size = start_write + to_write;
}
else {
*new_size = file_size;
} }
return 0;
} }
// Don't include the sub_block header // Don't include the sub_block header
...@@ -897,10 +902,8 @@ toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DA ...@@ -897,10 +902,8 @@ toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DA
DISKOFF offset; DISKOFF offset;
toku_blocknum_realloc_on_disk(h->blocktable, blocknum, n_to_write, &offset, toku_blocknum_realloc_on_disk(h->blocktable, blocknum, n_to_write, &offset,
h, for_checkpoint); //dirties h h, fd, for_checkpoint); //dirties h
toku_lock_for_pwrite(); toku_os_full_pwrite(fd, compressed_buf, n_to_write, offset);
toku_full_pwrite_extend(fd, compressed_buf, n_to_write, offset);
toku_unlock_for_pwrite();
} }
//printf("%s:%d wrote %d bytes for %lld size=%lld\n", __FILE__, __LINE__, w.ndone, off, size); //printf("%s:%d wrote %d bytes for %lld size=%lld\n", __FILE__, __LINE__, w.ndone, off, size);
...@@ -914,8 +917,8 @@ deserialize_child_buffer(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf, ...@@ -914,8 +917,8 @@ deserialize_child_buffer(NONLEAF_CHILDINFO bnc, struct rbuf *rbuf,
DESCRIPTOR desc, ft_compare_func cmp) { DESCRIPTOR desc, ft_compare_func cmp) {
int r; int r;
int n_in_this_buffer = rbuf_int(rbuf); int n_in_this_buffer = rbuf_int(rbuf);
void **fresh_offsets, **stale_offsets; void **fresh_offsets = NULL, **stale_offsets = NULL;
void **broadcast_offsets; void **broadcast_offsets = NULL;
int nfresh = 0, nstale = 0; int nfresh = 0, nstale = 0;
int nbroadcast_offsets = 0; int nbroadcast_offsets = 0;
if (cmp) { if (cmp) {
...@@ -1781,8 +1784,8 @@ deserialize_and_upgrade_internal_node(FTNODE node, ...@@ -1781,8 +1784,8 @@ deserialize_and_upgrade_internal_node(FTNODE node,
NONLEAF_CHILDINFO bnc = BNC(node, i); NONLEAF_CHILDINFO bnc = BNC(node, i);
int n_in_this_buffer = rbuf_int(rb); int n_in_this_buffer = rbuf_int(rb);
void **fresh_offsets; void **fresh_offsets = NULL;
void **broadcast_offsets; void **broadcast_offsets = NULL;
int nfresh = 0; int nfresh = 0;
int nbroadcast_offsets = 0; int nbroadcast_offsets = 0;
...@@ -2639,10 +2642,8 @@ toku_serialize_rollback_log_to (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE log ...@@ -2639,10 +2642,8 @@ toku_serialize_rollback_log_to (int fd, BLOCKNUM blocknum, ROLLBACK_LOG_NODE log
lazy_assert(blocknum.b>=0); lazy_assert(blocknum.b>=0);
DISKOFF offset; DISKOFF offset;
toku_blocknum_realloc_on_disk(h->blocktable, blocknum, n_to_write, &offset, toku_blocknum_realloc_on_disk(h->blocktable, blocknum, n_to_write, &offset,
h, for_checkpoint); //dirties h h, fd, for_checkpoint); //dirties h
toku_lock_for_pwrite(); toku_os_full_pwrite(fd, compressed_buf, n_to_write, offset);
toku_full_pwrite_extend(fd, compressed_buf, n_to_write, offset);
toku_unlock_for_pwrite();
} }
toku_free(compressed_buf); toku_free(compressed_buf);
log->dirty = 0; // See #1957. Must set the node to be clean after serializing it so that it doesn't get written again on the next checkpoint or eviction. log->dirty = 0; // See #1957. Must set the node to be clean after serializing it so that it doesn't get written again on the next checkpoint or eviction.
......
...@@ -52,7 +52,7 @@ static inline void nb_mutex_lock(NB_MUTEX nb_mutex, ...@@ -52,7 +52,7 @@ static inline void nb_mutex_lock(NB_MUTEX nb_mutex,
// release a write lock // release a write lock
// expects: mutex is locked // expects: mutex is locked
static inline void nb_mutex_write_unlock(NB_MUTEX nb_mutex) { static inline void nb_mutex_unlock(NB_MUTEX nb_mutex) {
rwlock_write_unlock(&nb_mutex->lock); rwlock_write_unlock(&nb_mutex->lock);
} }
......
...@@ -35,13 +35,13 @@ test_fifo_enq (int n) { ...@@ -35,13 +35,13 @@ test_fifo_enq (int n) {
// this was a function but icc cant handle it // this was a function but icc cant handle it
#define buildkey(len) { \ #define buildkey(len) { \
thekeylen = len; \ thekeylen = len+1; \
thekey = toku_realloc(thekey, thekeylen); \ thekey = toku_realloc(thekey, thekeylen); \
memset(thekey, len, thekeylen); \ memset(thekey, len, thekeylen); \
} }
#define buildval(len) { \ #define buildval(len) { \
thevallen = len+1; \ thevallen = len+2; \
theval = toku_realloc(theval, thevallen); \ theval = toku_realloc(theval, thevallen); \
memset(theval, ~len, thevallen); \ memset(theval, ~len, thevallen); \
} }
......
...@@ -341,6 +341,7 @@ test_prefetching(void) { ...@@ -341,6 +341,7 @@ test_prefetching(void) {
brt_h->panic = 0; brt_h->panic_string = 0; brt_h->panic = 0; brt_h->panic_string = 0;
toku_ft_init_treelock(brt_h); toku_ft_init_treelock(brt_h);
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
...@@ -351,7 +352,7 @@ test_prefetching(void) { ...@@ -351,7 +352,7 @@ test_prefetching(void) {
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, FALSE); toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
......
...@@ -284,6 +284,7 @@ test_serialize_nonleaf(void) { ...@@ -284,6 +284,7 @@ test_serialize_nonleaf(void) {
brt_h->panic = 0; brt_h->panic_string = 0; brt_h->panic = 0; brt_h->panic_string = 0;
toku_ft_init_treelock(brt_h); toku_ft_init_treelock(brt_h);
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
...@@ -294,7 +295,7 @@ test_serialize_nonleaf(void) { ...@@ -294,7 +295,7 @@ test_serialize_nonleaf(void) {
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, FALSE); toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
...@@ -375,6 +376,7 @@ test_serialize_leaf(void) { ...@@ -375,6 +376,7 @@ test_serialize_leaf(void) {
brt_h->panic = 0; brt_h->panic_string = 0; brt_h->panic = 0; brt_h->panic_string = 0;
toku_ft_init_treelock(brt_h); toku_ft_init_treelock(brt_h);
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
...@@ -385,7 +387,7 @@ test_serialize_leaf(void) { ...@@ -385,7 +387,7 @@ test_serialize_leaf(void) {
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, FALSE); toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
......
...@@ -57,7 +57,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy) { ...@@ -57,7 +57,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy) {
const int nodesize = (1<<22); const int nodesize = (1<<22);
struct ftnode sn, *dn; struct ftnode sn, *dn;
int fd = open(__SRCFILE__ ".ft_handle", O_RDWR|O_CREAT|O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO); assert(fd >= 0); int fd = open(__SRCFILE__ ".ft", O_RDWR|O_CREAT|O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO); assert(fd >= 0);
int r; int r;
...@@ -116,6 +116,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy) { ...@@ -116,6 +116,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy) {
brt_h->compare_fun = long_key_cmp; brt_h->compare_fun = long_key_cmp;
toku_ft_init_treelock(brt_h); toku_ft_init_treelock(brt_h);
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
...@@ -126,7 +127,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy) { ...@@ -126,7 +127,7 @@ test_serialize_leaf(int valsize, int nelts, double entropy) {
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, FALSE); toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
...@@ -186,7 +187,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) { ...@@ -186,7 +187,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) {
const int nodesize = (1<<22); const int nodesize = (1<<22);
struct ftnode sn, *dn; struct ftnode sn, *dn;
int fd = open(__SRCFILE__ ".ft_handle", O_RDWR|O_CREAT|O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO); assert(fd >= 0); int fd = open(__SRCFILE__ ".ft", O_RDWR|O_CREAT|O_BINARY, S_IRWXU|S_IRWXG|S_IRWXO); assert(fd >= 0);
int r; int r;
...@@ -254,6 +255,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) { ...@@ -254,6 +255,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) {
brt_h->compare_fun = long_key_cmp; brt_h->compare_fun = long_key_cmp;
toku_ft_init_treelock(brt_h); toku_ft_init_treelock(brt_h);
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
...@@ -264,7 +266,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) { ...@@ -264,7 +266,7 @@ test_serialize_nonleaf(int valsize, int nelts, double entropy) {
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, FALSE); toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
......
...@@ -261,6 +261,8 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, BOOL do_clone) { ...@@ -261,6 +261,8 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, BOOL do_clone) {
brt_h->panic = 0; brt_h->panic_string = 0; brt_h->panic = 0; brt_h->panic_string = 0;
toku_ft_init_treelock(brt_h); toku_ft_init_treelock(brt_h);
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
...@@ -271,7 +273,7 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, BOOL do_clone) { ...@@ -271,7 +273,7 @@ test_serialize_leaf_check_msn(enum ftnode_verify_type bft, BOOL do_clone) {
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, FALSE); toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
...@@ -408,6 +410,7 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, BOOL do_clone ...@@ -408,6 +410,7 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, BOOL do_clone
brt_h->panic = 0; brt_h->panic_string = 0; brt_h->panic = 0; brt_h->panic_string = 0;
toku_ft_init_treelock(brt_h); toku_ft_init_treelock(brt_h);
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
...@@ -418,7 +421,7 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, BOOL do_clone ...@@ -418,7 +421,7 @@ test_serialize_leaf_with_large_pivots(enum ftnode_verify_type bft, BOOL do_clone
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, FALSE); toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
...@@ -552,6 +555,7 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, BOOL do_clone) { ...@@ -552,6 +555,7 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, BOOL do_clone) {
brt_h->panic = 0; brt_h->panic_string = 0; brt_h->panic = 0; brt_h->panic_string = 0;
toku_ft_init_treelock(brt_h); toku_ft_init_treelock(brt_h);
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
...@@ -562,7 +566,7 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, BOOL do_clone) { ...@@ -562,7 +566,7 @@ test_serialize_leaf_with_many_rows(enum ftnode_verify_type bft, BOOL do_clone) {
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, FALSE); toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
...@@ -701,6 +705,7 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, BOOL do_clone) ...@@ -701,6 +705,7 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, BOOL do_clone)
brt_h->panic = 0; brt_h->panic_string = 0; brt_h->panic = 0; brt_h->panic_string = 0;
toku_ft_init_treelock(brt_h); toku_ft_init_treelock(brt_h);
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
...@@ -711,7 +716,7 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, BOOL do_clone) ...@@ -711,7 +716,7 @@ test_serialize_leaf_with_large_rows(enum ftnode_verify_type bft, BOOL do_clone)
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, FALSE); toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
...@@ -866,6 +871,7 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, BOOL ...@@ -866,6 +871,7 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, BOOL
brt_h->panic = 0; brt_h->panic_string = 0; brt_h->panic = 0; brt_h->panic_string = 0;
toku_ft_init_treelock(brt_h); toku_ft_init_treelock(brt_h);
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
...@@ -876,7 +882,7 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, BOOL ...@@ -876,7 +882,7 @@ test_serialize_leaf_with_empty_basement_nodes(enum ftnode_verify_type bft, BOOL
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, FALSE); toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
...@@ -995,6 +1001,7 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b ...@@ -995,6 +1001,7 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b
brt_h->panic = 0; brt_h->panic_string = 0; brt_h->panic = 0; brt_h->panic_string = 0;
toku_ft_init_treelock(brt_h); toku_ft_init_treelock(brt_h);
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
...@@ -1005,7 +1012,7 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b ...@@ -1005,7 +1012,7 @@ test_serialize_leaf_with_multiple_empty_basement_nodes(enum ftnode_verify_type b
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, FALSE); toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
...@@ -1129,6 +1136,7 @@ test_serialize_leaf(enum ftnode_verify_type bft, BOOL do_clone) { ...@@ -1129,6 +1136,7 @@ test_serialize_leaf(enum ftnode_verify_type bft, BOOL do_clone) {
brt_h->panic = 0; brt_h->panic_string = 0; brt_h->panic = 0; brt_h->panic_string = 0;
toku_ft_init_treelock(brt_h); toku_ft_init_treelock(brt_h);
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
...@@ -1139,7 +1147,7 @@ test_serialize_leaf(enum ftnode_verify_type bft, BOOL do_clone) { ...@@ -1139,7 +1147,7 @@ test_serialize_leaf(enum ftnode_verify_type bft, BOOL do_clone) {
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, FALSE); toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
...@@ -1276,6 +1284,7 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, BOOL do_clone) { ...@@ -1276,6 +1284,7 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, BOOL do_clone) {
brt_h->panic = 0; brt_h->panic_string = 0; brt_h->panic = 0; brt_h->panic_string = 0;
toku_ft_init_treelock(brt_h); toku_ft_init_treelock(brt_h);
toku_blocktable_create_new(&brt_h->blocktable); toku_blocktable_create_new(&brt_h->blocktable);
{ int r_truncate = ftruncate(fd, 0); CKERR(r_truncate); }
//Want to use block #20 //Want to use block #20
BLOCKNUM b = make_blocknum(0); BLOCKNUM b = make_blocknum(0);
while (b.b < 20) { while (b.b < 20) {
...@@ -1286,7 +1295,7 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, BOOL do_clone) { ...@@ -1286,7 +1295,7 @@ test_serialize_nonleaf(enum ftnode_verify_type bft, BOOL do_clone) {
{ {
DISKOFF offset; DISKOFF offset;
DISKOFF size; DISKOFF size;
toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, FALSE); toku_blocknum_realloc_on_disk(brt_h->blocktable, b, 100, &offset, brt_h, fd, FALSE);
assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE); assert(offset==BLOCK_ALLOCATOR_TOTAL_HEADER_RESERVE);
toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size); toku_translate_blocknum_to_offset_size(brt_h->blocktable, b, &offset, &size);
......
...@@ -387,7 +387,8 @@ int test_main (int argc, const char *argv[]) { ...@@ -387,7 +387,8 @@ int test_main (int argc, const char *argv[]) {
char deletecmd[templen]; char deletecmd[templen];
int n = snprintf(deletecmd, templen, "rm -rf %s", directory); int n = snprintf(deletecmd, templen, "rm -rf %s", directory);
assert(n>0 && n<templen); assert(n>0 && n<templen);
system(deletecmd); r = system(deletecmd);
CKERR(r);
} }
return 0; return 0;
......
...@@ -16,7 +16,8 @@ ...@@ -16,7 +16,8 @@
static void test_it (int N) { static void test_it (int N) {
FT_HANDLE brt; FT_HANDLE brt;
int r; int r;
system("rm -rf " TESTDIR); r = system("rm -rf " TESTDIR);
CKERR(r);
r = toku_os_mkdir(TESTDIR, S_IRWXU); CKERR(r); r = toku_os_mkdir(TESTDIR, S_IRWXU); CKERR(r);
TOKULOGGER logger; TOKULOGGER logger;
......
...@@ -39,8 +39,9 @@ test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute_ ...@@ -39,8 +39,9 @@ test_main (int argc __attribute__((__unused__)), const char *argv[] __attribute_
assert(r==0); assert(r==0);
} }
{ {
int r = maybe_preallocate_in_file(fd, 1000); int64_t size_after;
assert(r==0); toku_maybe_preallocate_in_file(fd, 1000, file_size, &size_after);
assert(size_after == file_size);
} }
int64_t file_size2; int64_t file_size2;
{ {
......
...@@ -331,6 +331,7 @@ int toku_txn_manager_start_txn( ...@@ -331,6 +331,7 @@ int toku_txn_manager_start_txn(
} }
if (xid == TXNID_NONE) { if (xid == TXNID_NONE) {
LSN first_lsn; LSN first_lsn;
invariant(logger);
r = toku_log_xbegin(logger, &first_lsn, 0, parent ? parent->txnid64 : 0); r = toku_log_xbegin(logger, &first_lsn, 0, parent ? parent->txnid64 : 0);
assert_zero(r); assert_zero(r);
xid = first_lsn.lsn; xid = first_lsn.lsn;
......
...@@ -9,6 +9,12 @@ if(BUILD_TESTING) ...@@ -9,6 +9,12 @@ if(BUILD_TESTING)
set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES dir.${src}.test) set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES dir.${src}.test)
endforeach(src) endforeach(src)
include(CheckCCompilerFlag)
check_c_compiler_flag(-Wno-unused-result HAVE_WNO_UNUSED_RESULT)
if (HAVE_WNO_UNUSED_RESULT)
set_property(SOURCE try-leak-lost.c APPEND PROPERTY COMPILE_FLAGS -Wno-unused-result)
endif ()
foreach(src ${srcs}) foreach(src ${srcs})
get_filename_component(test ${src} NAME_WE) get_filename_component(test ${src} NAME_WE)
......
...@@ -17,7 +17,7 @@ create_files(int N, int fds[N]) { ...@@ -17,7 +17,7 @@ create_files(int N, int fds[N]) {
char name[30]; char name[30];
for (i = 0; i < N; i++) { for (i = 0; i < N; i++) {
snprintf(name, sizeof(name), "%d", i); snprintf(name, sizeof(name), "%d", i);
fds[i] = open(name, O_CREAT|O_WRONLY); fds[i] = open(name, O_CREAT|O_WRONLY, 0644);
if (fds[i] < 0) { if (fds[i] < 0) {
r = errno; r = errno;
CKERR(r); CKERR(r);
......
...@@ -67,7 +67,8 @@ int test_main(int argc, char * const argv[]) { ...@@ -67,7 +67,8 @@ int test_main(int argc, char * const argv[]) {
const int size = 10+strlen(env_dir); const int size = 10+strlen(env_dir);
char cmd[size]; char cmd[size];
snprintf(cmd, size, "rm -rf %s", env_dir); snprintf(cmd, size, "rm -rf %s", env_dir);
system(cmd); int r = system(cmd);
CKERR(r);
} }
CHK(toku_os_mkdir(env_dir, S_IRWXU+S_IRWXG+S_IRWXO)); CHK(toku_os_mkdir(env_dir, S_IRWXU+S_IRWXG+S_IRWXO));
......
...@@ -188,7 +188,8 @@ unlink_dir (const char *dir) { ...@@ -188,7 +188,8 @@ unlink_dir (const char *dir) {
int len = strlen(dir)+100; int len = strlen(dir)+100;
char cmd[len]; char cmd[len];
snprintf(cmd, len, "rm -rf %s", dir); snprintf(cmd, len, "rm -rf %s", dir);
system(cmd); int r = system(cmd);
CKERR(r);
} }
int int
......
...@@ -13,7 +13,7 @@ static void setup_env (void) { ...@@ -13,7 +13,7 @@ static void setup_env (void) {
const int len = strlen(envdir)+100; const int len = strlen(envdir)+100;
char cmd[len]; char cmd[len];
snprintf(cmd, len, "rm -rf %s", envdir); snprintf(cmd, len, "rm -rf %s", envdir);
system(cmd); {int r = system(cmd); CKERR(r); }
{int r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); } {int r = toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO); CKERR(r); }
{int r = db_env_create(&env, 0); CKERR(r); } {int r = db_env_create(&env, 0); CKERR(r); }
//env->set_errfile(env, stderr); //env->set_errfile(env, stderr);
......
...@@ -9,7 +9,8 @@ static void clean_env (const char *envdir) { ...@@ -9,7 +9,8 @@ static void clean_env (const char *envdir) {
const int len = strlen(envdir)+100; const int len = strlen(envdir)+100;
char cmd[len]; char cmd[len];
snprintf(cmd, len, "rm -rf %s", envdir); snprintf(cmd, len, "rm -rf %s", envdir);
system(cmd); int r = system(cmd);
CKERR(r);
CKERR(toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO)); CKERR(toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO));
} }
......
...@@ -9,7 +9,8 @@ static void clean_env (const char *envdir) { ...@@ -9,7 +9,8 @@ static void clean_env (const char *envdir) {
const int len = strlen(envdir)+100; const int len = strlen(envdir)+100;
char cmd[len]; char cmd[len];
snprintf(cmd, len, "rm -rf %s", envdir); snprintf(cmd, len, "rm -rf %s", envdir);
system(cmd); int r = system(cmd);
CKERR(r);
CKERR(toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO)); CKERR(toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO));
} }
......
...@@ -9,7 +9,8 @@ static void clean_env (const char *envdir) { ...@@ -9,7 +9,8 @@ static void clean_env (const char *envdir) {
const int len = strlen(envdir)+100; const int len = strlen(envdir)+100;
char cmd[len]; char cmd[len];
snprintf(cmd, len, "rm -rf %s", envdir); snprintf(cmd, len, "rm -rf %s", envdir);
system(cmd); int r = system(cmd);
CKERR(r);
CKERR(toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO)); CKERR(toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO));
} }
......
...@@ -9,7 +9,8 @@ static void clean_env (const char *envdir) { ...@@ -9,7 +9,8 @@ static void clean_env (const char *envdir) {
const int len = strlen(envdir)+100; const int len = strlen(envdir)+100;
char cmd[len]; char cmd[len];
snprintf(cmd, len, "rm -rf %s", envdir); snprintf(cmd, len, "rm -rf %s", envdir);
system(cmd); int r = system(cmd);
CKERR(r);
CKERR(toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO)); CKERR(toku_os_mkdir(envdir, S_IRWXU+S_IRWXG+S_IRWXO));
} }
......
...@@ -105,7 +105,8 @@ int test_main(int argc, char * const argv[]) { ...@@ -105,7 +105,8 @@ int test_main(int argc, char * const argv[]) {
const int size = 10+strlen(env_dir); const int size = 10+strlen(env_dir);
char cmd[size]; char cmd[size];
snprintf(cmd, size, "rm -rf %s", env_dir); snprintf(cmd, size, "rm -rf %s", env_dir);
system(cmd); int r = system(cmd);
CKERR(r);
} }
CHK(toku_os_mkdir(env_dir, S_IRWXU+S_IRWXG+S_IRWXO)); CHK(toku_os_mkdir(env_dir, S_IRWXU+S_IRWXG+S_IRWXO));
const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE; const int envflags = DB_INIT_MPOOL|DB_CREATE|DB_THREAD |DB_INIT_LOCK|DB_INIT_LOG|DB_INIT_TXN|DB_PRIVATE;
......
...@@ -39,7 +39,7 @@ initialize_values (void) { ...@@ -39,7 +39,7 @@ initialize_values (void) {
} }
u_int32_t len = random() % MAX_SIZE; u_int32_t len = random() % MAX_SIZE;
fillrandom(keybuf, len); fillrandom(keybuf, len);
dbt_init(&vals[nest_level], &keybuf[0], len); dbt_init(&key, &keybuf[0], len);
} }
......
...@@ -61,7 +61,7 @@ initialize_values (void) { ...@@ -61,7 +61,7 @@ initialize_values (void) {
} }
u_int32_t len = random() % MAX_SIZE; u_int32_t len = random() % MAX_SIZE;
fillrandom(keybuf, len); fillrandom(keybuf, len);
dbt_init(&vals[nest_level], &keybuf[0], len); dbt_init(&key, &keybuf[0], len);
} }
......
...@@ -68,7 +68,7 @@ initialize_values (void) { ...@@ -68,7 +68,7 @@ initialize_values (void) {
} }
u_int32_t len = random() % MAX_SIZE; u_int32_t len = random() % MAX_SIZE;
fillrandom(keybuf, len); fillrandom(keybuf, len);
dbt_init(&vals[nest_level], &keybuf[0], len); dbt_init(&key, &keybuf[0], len);
} }
......
...@@ -72,7 +72,7 @@ initialize_values (void) { ...@@ -72,7 +72,7 @@ initialize_values (void) {
} }
u_int32_t len = random() % MAX_SIZE; u_int32_t len = random() % MAX_SIZE;
fillrandom(keybuf, len); fillrandom(keybuf, len);
dbt_init(&vals[nest_level], &keybuf[0], len); dbt_init(&key, &keybuf[0], len);
fillrandom(junkvalbuf, MAX_SIZE-1); fillrandom(junkvalbuf, MAX_SIZE-1);
dbt_init(&junkval, &junkvalbuf[0], MAX_SIZE-1); dbt_init(&junkval, &junkvalbuf[0], MAX_SIZE-1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment