Commit a2a18dd9 authored by Sergei Golubchik's avatar Sergei Golubchik

tokudb-7.5.3

......@@ -43,7 +43,12 @@ IF(NOT DEFINED TOKUDB_VERSION)
ENDIF()
ENDIF()
IF(DEFINED TOKUDB_VERSION)
ADD_DEFINITIONS("-DTOKUDB_VERSION=\"${TOKUDB_VERSION}\"")
ADD_DEFINITIONS("-DTOKUDB_VERSION=${TOKUDB_VERSION}")
IF (${TOKUDB_VERSION} MATCHES "^tokudb-([0-9]+)\\.([0-9]+)\\.([0-9]+.*)")
ADD_DEFINITIONS("-DTOKUDB_VERSION_MAJOR=${CMAKE_MATCH_1}")
ADD_DEFINITIONS("-DTOKUDB_VERSION_MINOR=${CMAKE_MATCH_2}")
ADD_DEFINITIONS("-DTOKUDB_VERSION_PATCH=${CMAKE_MATCH_3}")
ENDIF()
ENDIF()
IF(DEFINED TOKUDB_NOPATCH_CONFIG)
......
......@@ -24,14 +24,14 @@ working MySQL or MariaDB with Tokutek patches, and with the TokuDB storage
engine, called `make.mysql.bash`. This script will download copies of the
needed source code from github and build everything.
To build MySQL 5.5.38 with TokuDB 7.1.7:
To build MySQL 5.5.39 with TokuDB 7.5.2:
```sh
scripts/make.mysql.bash --mysqlbuild=mysql-5.5.38-tokudb-7.1.7-linux-x86_64
scripts/make.mysql.bash --mysqlbuild=mysql-5.5.39-tokudb-7.5.2-linux-x86_64
```
To build MariaDB 5.5.38 with TokuDB 7.1.7:
To build MariaDB 5.5.39 with TokuDB 7.5.2:
```sh
scripts/make.mysql.bash --mysqlbuild=mariadb-5.5.38-tokudb-7.1.7-linux-x86_64
scripts/make.mysql.bash --mysqlbuild=mariadb-5.5.39-tokudb-7.5.2-linux-x86_64
```
Before you start, make sure you have a C++11-compatible compiler (GCC >=
......
......@@ -16,9 +16,7 @@
# This script merges many static libraries into
# one big library on Unix.
SET(TARGET_LOCATION "@TARGET_LOCATION@")
SET(TARGET "@TARGET@")
SET(STATIC_LIBS "@STATIC_LIBS@")
SET(CMAKE_CURRENT_BINARY_DIR "@CMAKE_CURRENT_BINARY_DIR@")
SET(CMAKE_AR "@CMAKE_AR@")
SET(CMAKE_RANLIB "@CMAKE_RANLIB@")
......@@ -30,7 +28,7 @@ MAKE_DIRECTORY(${TEMP_DIR})
# clashes) Since the lib may contain objects with the same name, we first
# list the archive contents, then uniquify the object names as we extract
# them.
FOREACH(LIB ${STATIC_LIBS})
FOREACH(LIB ${STATIC_LIB_FILES})
GET_FILENAME_COMPONENT(NAME_NO_EXT ${LIB} NAME_WE)
SET(TEMP_SUBDIR ${TEMP_DIR}/${NAME_NO_EXT})
MAKE_DIRECTORY(${TEMP_SUBDIR})
......@@ -87,10 +85,10 @@ FOREACH(OBJ ${OBJECTS})
SET(ALL_OBJECTS ${ALL_OBJECTS} ${OBJ})
ENDFOREACH()
FILE(TO_NATIVE_PATH ${TARGET_LOCATION} ${TARGET_LOCATION})
FILE(TO_NATIVE_PATH ${TARGET_FILE} TARGET_FILE)
# Now pack the objects into library with ar.
EXECUTE_PROCESS(
COMMAND ${CMAKE_AR} rcs ${TARGET_LOCATION} ${ALL_OBJECTS}
COMMAND ${CMAKE_AR} rcs ${TARGET_FILE} ${ALL_OBJECTS}
WORKING_DIRECTORY ${TEMP_DIR}
)
......
......@@ -27,17 +27,12 @@ MACRO(TOKU_MERGE_STATIC_LIBS TARGET OUTPUT_NAME LIBS_TO_MERGE)
SET(OSLIBS)
FOREACH(LIB ${LIBS_TO_MERGE})
GET_TARGET_PROPERTY(LIB_LOCATION ${LIB} LOCATION)
GET_TARGET_PROPERTY(LIB_TYPE ${LIB} TYPE)
IF(NOT LIB_LOCATION)
# 3rd party library like libz.so. Make sure that everything
# that links to our library links to this one as well.
LIST(APPEND OSLIBS ${LIB})
ELSE()
IF(TARGET ${LIB})
# This is a target in current project
# (can be a static or shared lib)
GET_TARGET_PROPERTY(LIB_TYPE ${LIB} TYPE)
IF(LIB_TYPE STREQUAL "STATIC_LIBRARY")
SET(STATIC_LIBS ${STATIC_LIBS} ${LIB_LOCATION})
LIST(APPEND STATIC_LIBS ${LIB})
ADD_DEPENDENCIES(${TARGET} ${LIB})
# Extract dependend OS libraries
TOKU_GET_DEPENDEND_OS_LIBS(${LIB} LIB_OSLIBS)
......@@ -46,6 +41,10 @@ MACRO(TOKU_MERGE_STATIC_LIBS TARGET OUTPUT_NAME LIBS_TO_MERGE)
# This is a shared library our static lib depends on.
LIST(APPEND OSLIBS ${LIB})
ENDIF()
ELSE()
# 3rd party library like libz.so. Make sure that everything
# that links to our library links to this one as well.
LIST(APPEND OSLIBS ${LIB})
ENDIF()
ENDFOREACH()
IF(OSLIBS)
......@@ -65,19 +64,21 @@ MACRO(TOKU_MERGE_STATIC_LIBS TARGET OUTPUT_NAME LIBS_TO_MERGE)
# To merge libs, just pass them to lib.exe command line.
SET(LINKER_EXTRA_FLAGS "")
FOREACH(LIB ${STATIC_LIBS})
SET(LINKER_EXTRA_FLAGS "${LINKER_EXTRA_FLAGS} ${LIB}")
SET(LINKER_EXTRA_FLAGS "${LINKER_EXTRA_FLAGS} $<TARGET_FILE:${LIB}>")
ENDFOREACH()
SET_TARGET_PROPERTIES(${TARGET} PROPERTIES STATIC_LIBRARY_FLAGS
"${LINKER_EXTRA_FLAGS}")
ELSE()
GET_TARGET_PROPERTY(TARGET_LOCATION ${TARGET} LOCATION)
FOREACH(STATIC_LIB ${STATIC_LIBS})
LIST(APPEND STATIC_LIB_FILES $<TARGET_FILE:${STATIC_LIB}>)
ENDFOREACH()
IF(APPLE)
# Use OSX's libtool to merge archives (ihandles universal
# binaries properly)
ADD_CUSTOM_COMMAND(TARGET ${TARGET} POST_BUILD
COMMAND rm ${TARGET_LOCATION}
COMMAND /usr/bin/libtool -static -o ${TARGET_LOCATION}
${STATIC_LIBS}
COMMAND rm $<TARGET_FILE:${TARGET}>
COMMAND /usr/bin/libtool -static -o $<TARGET_FILE:${TARGET}>
${STATIC_LIB_FILES}
)
ELSE()
# Generic Unix, Cygwin or MinGW. In post-build step, call
......@@ -88,11 +89,14 @@ MACRO(TOKU_MERGE_STATIC_LIBS TARGET OUTPUT_NAME LIBS_TO_MERGE)
${TOKU_CMAKE_SCRIPT_DIR}/merge_archives_unix.cmake.in
${CMAKE_CURRENT_BINARY_DIR}/merge_archives_${TARGET}.cmake
@ONLY
)
)
STRING(REGEX REPLACE ";" "\\\;" STATIC_LIB_FILES "${STATIC_LIB_FILES}")
ADD_CUSTOM_COMMAND(TARGET ${TARGET} POST_BUILD
COMMAND rm ${TARGET_LOCATION}
COMMAND ${CMAKE_COMMAND} -P
${CMAKE_CURRENT_BINARY_DIR}/merge_archives_${TARGET}.cmake
COMMAND rm $<TARGET_FILE:${TARGET}>
COMMAND ${CMAKE_COMMAND}
-D TARGET_FILE=$<TARGET_FILE:${TARGET}>
-D STATIC_LIB_FILES="${STATIC_LIB_FILES}"
-P ${CMAKE_CURRENT_BINARY_DIR}/merge_archives_${TARGET}.cmake
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/merge_archives_${TARGET}.cmake"
)
ENDIF()
......
......@@ -17,15 +17,22 @@ add_c_defines(
if (NOT CMAKE_SYSTEM_NAME STREQUAL FreeBSD)
## on FreeBSD these types of macros actually remove functionality
add_c_defines(
_SVID_SOURCE
_DEFAULT_SOURCE
_XOPEN_SOURCE=600
)
endif ()
## add TOKU_PTHREAD_DEBUG for debug builds
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG TOKU_PTHREAD_DEBUG=1)
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DRD TOKU_PTHREAD_DEBUG=1)
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DRD _FORTIFY_SOURCE=2)
if (CMAKE_VERSION VERSION_LESS 3.0)
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG TOKU_PTHREAD_DEBUG=1)
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DRD TOKU_PTHREAD_DEBUG=1)
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DRD _FORTIFY_SOURCE=2)
else ()
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS
$<$<OR:$<CONFIG:DEBUG>,$<CONFIG:DRD>>:TOKU_PTHREAD_DEBUG=1>
$<$<CONFIG:DRD>:_FORTIFY_SOURCE=2>
)
endif ()
## coverage
option(USE_GCOV "Use gcov for test coverage." OFF)
......@@ -215,7 +222,7 @@ function(maybe_add_gcov_to_libraries)
foreach(lib ${ARGN})
add_space_separated_property(TARGET ${lib} COMPILE_FLAGS --coverage)
add_space_separated_property(TARGET ${lib} LINK_FLAGS --coverage)
target_link_libraries(${lib} gcov)
target_link_libraries(${lib} LINK_PRIVATE gcov)
endforeach(lib)
endif (USE_GCOV)
endfunction(maybe_add_gcov_to_libraries)
......@@ -2516,7 +2516,7 @@ toku_cachetable_minicron_shutdown(CACHETABLE ct) {
void toku_cachetable_prepare_close(CACHETABLE ct UU()) {
extern bool toku_serialize_in_parallel;
toku_serialize_in_parallel = true;
toku_drd_unsafe_set(&toku_serialize_in_parallel, true);
}
/* Requires that it all be flushed. */
......
......@@ -602,8 +602,9 @@ handle_split_of_child(
// We never set the rightmost blocknum to be the root.
// Instead, we wait for the root to split and let promotion initialize the rightmost
// blocknum to be the first non-root leaf node on the right extreme to recieve an insert.
invariant(ft->h->root_blocknum.b != ft->rightmost_blocknum.b);
if (childa->blocknum.b == ft->rightmost_blocknum.b) {
BLOCKNUM rightmost_blocknum = toku_drd_unsafe_fetch(&ft->rightmost_blocknum);
invariant(ft->h->root_blocknum.b != rightmost_blocknum.b);
if (childa->blocknum.b == rightmost_blocknum.b) {
// The rightmost leaf (a) split into (a) and (b). We want (b) to swap pair values
// with (a), now that it is the new rightmost leaf. This keeps the rightmost blocknum
// constant, the same the way we keep the root blocknum constant.
......@@ -1430,7 +1431,8 @@ ft_merge_child(
node->pivotkeys.delete_at(childnuma);
// Handle a merge of the rightmost leaf node.
if (did_merge && childb->blocknum.b == ft->rightmost_blocknum.b) {
BLOCKNUM rightmost_blocknum = toku_drd_unsafe_fetch(&ft->rightmost_blocknum);
if (did_merge && childb->blocknum.b == rightmost_blocknum.b) {
invariant(childb->blocknum.b != ft->h->root_blocknum.b);
toku_ftnode_swap_pair_values(childa, childb);
BP_BLOCKNUM(node, childnuma) = childa->blocknum;
......
......@@ -1202,7 +1202,7 @@ exit:
// We need a function to have something a drd suppression can reference
// see src/tests/drd.suppressions (unsafe_touch_clock)
static void unsafe_touch_clock(FTNODE node, int i) {
BP_TOUCH_CLOCK(node, i);
toku_drd_unsafe_set(&node->bp[i].clock_count, static_cast<unsigned char>(1));
}
// Callback that states if a partial fetch of the node is necessary
......@@ -1622,13 +1622,13 @@ static void inject_message_in_locked_node(
paranoid_invariant(msg_with_msn.msn().msn == node->max_msn_applied_to_node_on_disk.msn);
if (node->blocknum.b == ft->rightmost_blocknum.b) {
if (ft->seqinsert_score < FT_SEQINSERT_SCORE_THRESHOLD) {
if (toku_drd_unsafe_fetch(&ft->seqinsert_score) < FT_SEQINSERT_SCORE_THRESHOLD) {
// we promoted to the rightmost leaf node and the seqinsert score has not yet saturated.
toku_sync_fetch_and_add(&ft->seqinsert_score, 1);
}
} else if (ft->seqinsert_score != 0) {
} else if (toku_drd_unsafe_fetch(&ft->seqinsert_score) != 0) {
// we promoted to something other than the rightmost leaf node and the score should reset
ft->seqinsert_score = 0;
toku_drd_unsafe_set(&ft->seqinsert_score, static_cast<uint32_t>(0));
}
// if we call toku_ft_flush_some_child, then that function unpins the root
......@@ -1787,19 +1787,19 @@ static inline bool should_inject_in_node(seqinsert_loc loc, int height, int dept
return (height == 0 || (loc == NEITHER_EXTREME && (height <= 1 || depth >= 2)));
}
static void ft_set_or_verify_rightmost_blocknum(FT ft, BLOCKNUM b)
static void ft_verify_or_set_rightmost_blocknum(FT ft, BLOCKNUM b)
// Given: 'b', the _definitive_ and constant rightmost blocknum of 'ft'
{
if (ft->rightmost_blocknum.b == RESERVED_BLOCKNUM_NULL) {
if (toku_drd_unsafe_fetch(&ft->rightmost_blocknum.b) == RESERVED_BLOCKNUM_NULL) {
toku_ft_lock(ft);
if (ft->rightmost_blocknum.b == RESERVED_BLOCKNUM_NULL) {
ft->rightmost_blocknum = b;
toku_drd_unsafe_set(&ft->rightmost_blocknum, b);
}
toku_ft_unlock(ft);
}
// The rightmost blocknum only transitions from RESERVED_BLOCKNUM_NULL to non-null.
// If it's already set, verify that the stored value is consistent with 'b'
invariant(ft->rightmost_blocknum.b == b.b);
invariant(toku_drd_unsafe_fetch(&ft->rightmost_blocknum.b) == b.b);
}
bool toku_bnc_should_promote(FT ft, NONLEAF_CHILDINFO bnc) {
......@@ -1861,7 +1861,7 @@ static void push_something_in_subtree(
// otherwise. We explicitly skip the root node because then we don't have
// to worry about changing the rightmost blocknum when the root splits.
if (subtree_root->height == 0 && loc == RIGHT_EXTREME && subtree_root->blocknum.b != ft->h->root_blocknum.b) {
ft_set_or_verify_rightmost_blocknum(ft, subtree_root->blocknum);
ft_verify_or_set_rightmost_blocknum(ft, subtree_root->blocknum);
}
inject_message_in_locked_node(ft, subtree_root, target_childnum, msg, flow_deltas, gc_info);
} else {
......@@ -2239,7 +2239,7 @@ static int ft_leaf_get_relative_key_pos(FT ft, FTNODE leaf, const DBT *key, bool
nullptr, nullptr, nullptr
);
*target_childnum = childnum;
if (r == 0 && !le_latest_is_del(leftmost_le)) {
if (r == 0 && !le_latest_is_del(target_le)) {
*nondeleted_key_found = true;
}
}
......@@ -2278,20 +2278,21 @@ static int ft_maybe_insert_into_rightmost_leaf(FT ft, DBT *key, DBT *val, XIDS m
int r = -1;
uint32_t rightmost_fullhash;
BLOCKNUM rightmost_blocknum = ft->rightmost_blocknum;
BLOCKNUM rightmost_blocknum;
FTNODE rightmost_leaf = nullptr;
// Don't do the optimization if our heurstic suggests that
// insertion pattern is not sequential.
if (ft->seqinsert_score < FT_SEQINSERT_SCORE_THRESHOLD) {
if (toku_drd_unsafe_fetch(&ft->seqinsert_score) < FT_SEQINSERT_SCORE_THRESHOLD) {
goto cleanup;
}
// We know the seqinsert score is high enough that we should
// attemp to directly insert into the right most leaf. Because
// attempt to directly insert into the rightmost leaf. Because
// the score is non-zero, the rightmost blocknum must have been
// set. See inject_message_in_locked_node(), which only increases
// the score if the target node blocknum == rightmost_blocknum
rightmost_blocknum = ft->rightmost_blocknum;
invariant(rightmost_blocknum.b != RESERVED_BLOCKNUM_NULL);
// Pin the rightmost leaf with a write lock.
......
......@@ -574,14 +574,18 @@ toku_logger_make_space_in_inbuf (TOKULOGGER logger, int n_bytes_needed)
release_output(logger, fsynced_lsn);
}
void toku_logger_fsync (TOKULOGGER logger)
void toku_logger_fsync(TOKULOGGER logger)
// Effect: This is the exported fsync used by ydb.c for env_log_flush. Group commit doesn't have to work.
// Entry: Holds no locks
// Exit: Holds no locks
// Implementation note: Acquire the output condition lock, then the output permission, then release the output condition lock, then get the input lock.
// Then release everything.
// Then release everything. Hold the input lock while reading the current max lsn in buf to make drd happy that there is no data race.
{
toku_logger_maybe_fsync(logger, logger->inbuf.max_lsn_in_buf, true, false);
ml_lock(&logger->input_lock);
const LSN max_lsn_in_buf = logger->inbuf.max_lsn_in_buf;
ml_unlock(&logger->input_lock);
toku_logger_maybe_fsync(logger, max_lsn_in_buf, true, false);
}
void toku_logger_fsync_if_lsn_not_fsynced (TOKULOGGER logger, LSN lsn) {
......
......@@ -147,7 +147,7 @@ struct toku_thread_pool *get_ft_pool(void) {
}
void toku_serialize_set_parallel(bool in_parallel) {
toku_serialize_in_parallel = in_parallel;
toku_drd_unsafe_set(&toku_serialize_in_parallel, in_parallel);
}
void toku_ft_serialize_layer_init(void) {
......@@ -854,7 +854,7 @@ toku_serialize_ftnode_to (int fd, BLOCKNUM blocknum, FTNODE node, FTNODE_DISK_DA
ft->h->basementnodesize,
ft->h->compression_method,
do_rebalancing,
toku_serialize_in_parallel, // in_parallel
toku_drd_unsafe_fetch(&toku_serialize_in_parallel),
&n_to_write,
&n_uncompressed_bytes,
&compressed_buf
......
......@@ -95,6 +95,11 @@ if(BUILD_TESTING OR BUILD_FT_TESTS)
REQUIRED_FILES "ftdump-test-generate.ctest-data"
)
## keyrange has some inequality assumptions that were broken by
## promotion, they seem benign but are complicated, so for now, skip
## this test, when we get some time, we should fix it and re-enable it.
list(REMOVE_ITEM tests keyrange)
foreach(test ${tests})
add_ft_test(${test})
endforeach(test)
......
......@@ -327,7 +327,7 @@ toku_txn_manager_get_oldest_living_xid(TXN_MANAGER txn_manager) {
}
TXNID toku_txn_manager_get_oldest_referenced_xid_estimate(TXN_MANAGER txn_manager) {
return txn_manager->last_calculated_oldest_referenced_xid;
return toku_drd_unsafe_fetch(&txn_manager->last_calculated_oldest_referenced_xid);
}
int live_root_txn_list_iter(const TOKUTXN &live_xid, const uint32_t UU(index), TXNID **const referenced_xids);
......@@ -387,7 +387,7 @@ static void set_oldest_referenced_xid(TXN_MANAGER txn_manager) {
oldest_referenced_xid = txn_manager->last_xid;
}
invariant(oldest_referenced_xid != TXNID_MAX);
txn_manager->last_calculated_oldest_referenced_xid = oldest_referenced_xid;
toku_drd_unsafe_set(&txn_manager->last_calculated_oldest_referenced_xid, oldest_referenced_xid);
}
//Heaviside function to find a TOKUTXN by TOKUTXN (used to find the index)
......
......@@ -286,11 +286,9 @@
Memcheck:Leak
match-leak-kinds: reachable
fun:calloc
obj:/usr/lib/libdl-2.19.so
...
fun:dlsym
fun:_Z19toku_memory_startupv
fun:call_init.part.0
...
fun:_dl_init
obj:/usr/lib/ld-2.19.so
}
......@@ -275,7 +275,7 @@ void locktree::sto_end(void) {
void locktree::sto_end_early_no_accounting(void *prepared_lkr) {
sto_migrate_buffer_ranges_to_tree(prepared_lkr);
sto_end();
m_sto_score = 0;
toku_drd_unsafe_set(&m_sto_score, 0);
}
void locktree::sto_end_early(void *prepared_lkr) {
......@@ -330,7 +330,7 @@ void locktree::sto_migrate_buffer_ranges_to_tree(void *prepared_lkr) {
bool locktree::sto_try_acquire(void *prepared_lkr,
TXNID txnid,
const DBT *left_key, const DBT *right_key) {
if (m_rangetree->is_empty() && m_sto_buffer.is_empty() && m_sto_score >= STO_SCORE_THRESHOLD) {
if (m_rangetree->is_empty() && m_sto_buffer.is_empty() && data_race::unsafe_read<int>(m_sto_score) >= STO_SCORE_THRESHOLD) {
// We can do the optimization because the rangetree is empty, and
// we know its worth trying because the sto score is big enough.
sto_begin(txnid);
......@@ -536,16 +536,16 @@ void locktree::remove_overlapping_locks_for_txnid(TXNID txnid,
}
bool locktree::sto_txnid_is_valid_unsafe(void) const {
return m_sto_txnid != TXNID_NONE;
return data_race::unsafe_read<TXNID>(m_sto_txnid) != TXNID_NONE;
}
int locktree::sto_get_score_unsafe(void) const {
return m_sto_score;
return data_race::unsafe_read<int>(m_sto_score);
}
bool locktree::sto_try_release(TXNID txnid) {
bool released = false;
if (sto_txnid_is_valid_unsafe()) {
if (data_race::unsafe_read<TXNID>(m_sto_txnid) != TXNID_NONE) {
// check the bit again with a prepared locked keyrange,
// which protects the optimization bits and rangetree data
concurrent_tree::locked_keyrange lkr;
......@@ -585,7 +585,7 @@ void locktree::release_locks(TXNID txnid, const range_buffer *ranges) {
// the threshold and we'll try the optimization again. This
// is how a previously multithreaded system transitions into
// a single threaded system that benefits from the optimization.
if (sto_get_score_unsafe() < STO_SCORE_THRESHOLD) {
if (data_race::unsafe_read<int>(m_sto_score) < STO_SCORE_THRESHOLD) {
toku_sync_fetch_and_add(&m_sto_score, 1);
}
}
......
......@@ -267,19 +267,22 @@ void locktree_manager::release_lt(locktree *lt) {
uint32_t refs = lt->release_reference();
if (refs == 0) {
mutex_lock();
// lt may not have already been destroyed, so look it up.
locktree *find_lt = locktree_map_find(dict_id);
if (find_lt != nullptr) {
// A locktree is still in the map with that dict_id, so it must be
// equal to lt. This is true because dictionary ids are never reused.
// If the reference count is zero, it's our responsibility to remove
// it and do the destroy. Otherwise, someone still wants it.
invariant(find_lt == lt);
if (lt->get_reference_count() == 0) {
locktree_map_remove(lt);
do_destroy = true;
// If the locktree is still valid then check if it should be deleted.
if (find_lt == lt) {
if (lt->get_reference_count() == 0) {
locktree_map_remove(lt);
do_destroy = true;
}
m_lt_counters.add(lt->get_lock_request_info()->counters);
}
}
m_lt_counters.add(lt->get_lock_request_info()->counters);
mutex_unlock();
}
......
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id$"
/*
COPYING CONDITIONS NOTICE:
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation, and provided that the
following conditions are met:
* Redistributions of source code must retain this COPYING
CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
PATENT MARKING NOTICE (below), and the PATENT RIGHTS
GRANT (below).
* Redistributions in binary form must reproduce this COPYING
CONDITIONS NOTICE, the COPYRIGHT NOTICE (below), the
DISCLAIMER (below), the UNIVERSITY PATENT NOTICE (below), the
PATENT MARKING NOTICE (below), and the PATENT RIGHTS
GRANT (below) in the documentation and/or other materials
provided with the distribution.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
COPYRIGHT NOTICE:
TokuFT, Tokutek Fractal Tree Indexing Library.
Copyright (C) 2014 Tokutek, Inc.
DISCLAIMER:
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
UNIVERSITY PATENT NOTICE:
The technology is licensed by the Massachusetts Institute of
Technology, Rutgers State University of New Jersey, and the Research
Foundation of State University of New York at Stony Brook under
United States of America Serial No. 11/760379 and to the patents
and/or patent applications resulting from it.
PATENT MARKING NOTICE:
This software is covered by US Patent No. 8,185,551.
This software is covered by US Patent No. 8,489,638.
PATENT RIGHTS GRANT:
"THIS IMPLEMENTATION" means the copyrightable works distributed by
Tokutek as part of the Fractal Tree project.
"PATENT CLAIMS" means the claims of patents that are owned or
licensable by Tokutek, both currently or in the future; and that in
the absence of this license would be infringed by THIS
IMPLEMENTATION or by using or running THIS IMPLEMENTATION.
"PATENT CHALLENGE" shall mean a challenge to the validity,
patentability, enforceability and/or non-infringement of any of the
PATENT CLAIMS or otherwise opposing any of the PATENT CLAIMS.
Tokutek hereby grants to you, for the term and geographical scope of
the PATENT CLAIMS, a non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to
make, have made, use, offer to sell, sell, import, transfer, and
otherwise run, modify, and propagate the contents of THIS
IMPLEMENTATION, where such license applies only to the PATENT
CLAIMS. This grant does not include claims that would be infringed
only as a consequence of further modifications of THIS
IMPLEMENTATION. If you or your agent or licensee institute or order
or agree to the institution of patent litigation against any entity
(including a cross-claim or counterclaim in a lawsuit) alleging that
THIS IMPLEMENTATION constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any rights
granted to you under this License shall terminate as of the date
such litigation is filed. If you or your agent or exclusive
licensee institute or order or agree to the institution of a PATENT
CHALLENGE, then Tokutek may terminate any rights granted to you
under this License.
*/
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
// This test crashes prior to the FT-600 fix.
#include "manager_unit_test.h"
namespace toku {
static int my_cmp(DB *UU(db), const DBT *UU(a), const DBT *UU(b)) {
return 0;
}
static void my_test(locktree_manager *mgr) {
toku::comparator my_comparator;
my_comparator.create(my_cmp, nullptr);
DICTIONARY_ID a = { 42 };
for (int i=0; i<100000; i++) {
locktree *alt = mgr->get_lt(a, my_comparator, nullptr);
invariant_notnull(alt);
mgr->release_lt(alt);
}
my_comparator.destroy();
}
static void *my_tester(void *arg) {
locktree_manager *mgr = (locktree_manager *) arg;
my_test(mgr);
return arg;
}
void manager_unit_test::test_reference_release_lt(void) {
int r;
locktree_manager mgr;
mgr.create(nullptr, nullptr, nullptr, nullptr);
const int nthreads = 2;
pthread_t ids[nthreads];
for (int i = 0; i < nthreads; i++) {
r = toku_pthread_create(&ids[i], nullptr, my_tester, &mgr);
assert(r == 0);
}
for (int i = 0; i < nthreads; i++) {
void *ret;
r = toku_pthread_join(ids[i], &ret);
assert(r == 0);
}
my_test(&mgr);
mgr.destroy();
}
} /* namespace toku */
int main(void) {
toku::manager_unit_test test;
test.test_reference_release_lt();
return 0;
}
......@@ -88,9 +88,11 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include <stdio.h>
#include <stdlib.h>
int main(void) {
(void) malloc(42);
// GCC has gotten smart enough to optimize this away unless we use it.
printf("%p\n", malloc(42));
return 0;
}
......@@ -95,11 +95,11 @@ static void foo(int i) {
printf("%d\n", i);
}
int main(int argc, char *argv[]) {
int main(void) {
int arg;
int i;
for (i = 1; i < argc; i++) {
arg = atoi(argv[i]);
char *buf = (char *) &arg;
for (int i = 0; i < 2; i++) {
buf[i] = 'a';
}
foo(arg);
return 0;
......
......@@ -103,6 +103,10 @@ PATENT RIGHTS GRANT:
# define TOKU_VALGRIND_HG_DISABLE_CHECKING(p, size) VALGRIND_HG_DISABLE_CHECKING(p, size)
# define TOKU_DRD_IGNORE_VAR(v) DRD_IGNORE_VAR(v)
# define TOKU_DRD_STOP_IGNORING_VAR(v) DRD_STOP_IGNORING_VAR(v)
# define TOKU_ANNOTATE_IGNORE_READS_BEGIN() ANNOTATE_IGNORE_READS_BEGIN()
# define TOKU_ANNOTATE_IGNORE_READS_END() ANNOTATE_IGNORE_READS_END()
# define TOKU_ANNOTATE_IGNORE_WRITES_BEGIN() ANNOTATE_IGNORE_WRITES_BEGIN()
# define TOKU_ANNOTATE_IGNORE_WRITES_END() ANNOTATE_IGNORE_WRITES_END()
/*
* How to make helgrind happy about tree rotations and new mutex orderings:
......@@ -134,7 +138,51 @@ PATENT RIGHTS GRANT:
# define TOKU_VALGRIND_HG_DISABLE_CHECKING(p, size) ((void) 0)
# define TOKU_DRD_IGNORE_VAR(v)
# define TOKU_DRD_STOP_IGNORING_VAR(v)
# define TOKU_ANNOTATE_IGNORE_READS_BEGIN() ((void) 0)
# define TOKU_ANNOTATE_IGNORE_READS_END() ((void) 0)
# define TOKU_ANNOTATE_IGNORE_WRITES_BEGIN() ((void) 0)
# define TOKU_ANNOTATE_IGNORE_WRITES_END() ((void) 0)
# define TOKU_VALGRIND_RESET_MUTEX_ORDERING_INFO(mutex)
# define RUNNING_ON_VALGRIND (0U)
#endif
namespace data_race {
template<typename T>
class unsafe_read {
const T &_val;
public:
unsafe_read(const T &val)
: _val(val) {
TOKU_VALGRIND_HG_DISABLE_CHECKING(&_val, sizeof _val);
TOKU_ANNOTATE_IGNORE_READS_BEGIN();
}
~unsafe_read() {
TOKU_ANNOTATE_IGNORE_READS_END();
TOKU_VALGRIND_HG_ENABLE_CHECKING(&_val, sizeof _val);
}
operator T() const {
return _val;
}
};
} // namespace data_race
// Unsafely fetch and return a `T' from src, telling drd to ignore
// racey access to src for the next sizeof(*src) bytes
template <typename T>
T toku_drd_unsafe_fetch(T *src) {
return data_race::unsafe_read<T>(*src);
}
// Unsafely set a `T' value into *dest from src, telling drd to ignore
// racey access to dest for the next sizeof(*dest) bytes
template <typename T>
void toku_drd_unsafe_set(T *dest, const T src) {
TOKU_VALGRIND_HG_DISABLE_CHECKING(dest, sizeof *dest);
TOKU_ANNOTATE_IGNORE_WRITES_BEGIN();
*dest = src;
TOKU_ANNOTATE_IGNORE_WRITES_END();
TOKU_VALGRIND_HG_ENABLE_CHECKING(dest, sizeof *dest);
}
......@@ -766,8 +766,8 @@ if __name__ == '__main__':
help="skip the tests that don't involve upgrade [default=False]")
upgrade_group.add_option('--double_upgrade', action='store_true', dest='double_upgrade', default=False,
help='run the upgrade tests twice in a row [default=False]')
upgrade_group.add_option('--add_old_version', action='append', type='choice', dest='old_versions', choices=['4.2.0', '5.0.8', '5.2.7', '6.0.0', '6.1.0', '6.5.1', '6.6.3', '7.1.6'],
help='which old versions to use for running the stress tests in upgrade mode. can be specified multiple times [options=4.2.0, 5.0.8, 5.2.7, 6.0.0, 6.1.0, 6.5.1, 6.6.3, 7.1.6]')
upgrade_group.add_option('--add_old_version', action='append', type='choice', dest='old_versions', choices=['4.2.0', '5.0.8', '5.2.7', '6.0.0', '6.1.0', '6.5.1', '6.6.3', '7.0.1','7.1.6','v26','7.5.0'],
help='which old versions to use for running the stress tests in upgrade mode. can be specified multiple times [options=4.2.0, 5.0.8, 5.2.7, 6.0.0, 6.1.0, 6.5.1, 6.6.3, 7.1.0, 7.1.6, v26, 7.5.0]')
upgrade_group.add_option('--old_environments_dir', type='string', dest='old_environments_dir',
default=('%s/old-stress-test-envs' % default_tokudb_data),
help='directory containing old version environments (should contain 5.0.8/, 5.2.7/, etc, and the environments should be in those) [default=../../tokudb.data/stress_environments]')
......
......@@ -486,6 +486,7 @@ if(BUILD_TESTING OR BUILD_SRC_TESTS)
ydb/loader-stress-test4z.tdb
ydb/recover_stress.tdb
ydb/test3529.tdb
ydb/test_insert_unique.tdb
)
set_tests_properties(${phenomenally_long_tests} PROPERTIES TIMEOUT 14400)
endif(BUILD_TESTING OR BUILD_SRC_TESTS)
......@@ -123,4 +123,24 @@
fun:pthread_cond_destroy@*
...
fun:_ZN7evictor7destroyEv
}
\ No newline at end of file
}
{
<helgrind_doesnt_understand_the_way_the_world_works_and_ignores_our_disable_checking_instructions>
Helgrind:Race
fun:_ZN4toku8locktree15sto_try_acquireEPvmPK10__toku_dbtS4_
fun:_ZN4toku8locktree12acquire_lockEbmPK10__toku_dbtS3_PNS_9txnid_setE
fun:_ZN4toku8locktree16try_acquire_lockEbmPK10__toku_dbtS3_PNS_9txnid_setEb
fun:_ZN4toku8locktree18acquire_write_lockEmPK10__toku_dbtS3_PNS_9txnid_setEb
fun:_ZN4toku12lock_request5startEv
...
}
{
<helgrind_bug_323432_see_http://permalink.gmane.org/gmane.comp.debugging.valgrind/13325>
Helgrind:Race
obj:/usr/lib/valgrind/vgpreload_helgrind-amd64-linux.so
fun:pthread_mutex_destroy
fun:toku_mutex_destroy
fun:_ZN4toku8treenode4freeEPS0_
fun:_ZN4toku8treenode22remove_root_of_subtreeEv
...
}
......@@ -133,7 +133,7 @@ static void test_large_sequential_insert_unique(DB_ENV *env) {
r = db->set_readpagesize(db, 2 * 1024); CKERR(r);
r = db->open(db, NULL, "db", NULL, DB_BTREE, DB_CREATE, 0644); CKERR(r);
const int val_size = 1024;
const int val_size = 8;
char *XMALLOC_N(val_size, val_buf);
memset(val_buf, 'k', val_size);
DBT val;
......@@ -153,9 +153,18 @@ static void test_large_sequential_insert_unique(DB_ENV *env) {
// .. but re-inserting is okay, if we provisionally deleted the row
DB_TXN *txn;
r = env->txn_begin(env, NULL, &txn, 0); CKERR(r);
r = db->del(db, NULL, &key, DB_DELETE_ANY); CKERR(r);
r = db->put(db, NULL, &key, &val, DB_NOOVERWRITE); CKERR(r);
r = db->del(db, txn, &key, DB_DELETE_ANY); CKERR(r);
r = db->put(db, txn, &key, &val, DB_NOOVERWRITE); CKERR(r);
r = txn->commit(txn, 0); CKERR(r);
// re-inserting is also ok if we actually delete the row, for some key < k
if (i > 0) {
DBT other_key;
int other_k = toku_htonl(i - 10);
dbt_init(&other_key, &other_k, sizeof(other_k));
r = db->del(db, NULL, &other_key, DB_DELETE_ANY); CKERR(r);
r = db->put(db, NULL, &other_key, &val, DB_NOOVERWRITE); CKERR(r);
}
}
if (i > 0 && i % 250 == 0) {
// sanity check - unique checks on random keys we already inserted should
......
......@@ -138,15 +138,12 @@ static int update_callback(DB *UU(db), const DBT *UU(key), const DBT *old_val, c
void (*set_val)(const DBT *new_val, void *setval_extra), void *setval_extra) {
assert(extra != nullptr);
assert(old_val != nullptr);
assert(extra->size == 0);
assert(old_val->size == 0);
assert(extra->size == 0 || extra->size == 100);
assert(old_val->size == 0 || old_val->size == 100);
if (extra->data == nullptr) {
set_val(nullptr, setval_extra);
} else {
DBT new_val;
char empty_v;
dbt_init(&new_val, &empty_v, 0);
set_val(&new_val, setval_extra);
set_val(extra, setval_extra);
}
return 0;
}
......@@ -176,12 +173,13 @@ static void test_keylen_diff(enum overwrite_method method, bool control_test) {
r = db->set_readpagesize(db, 1 * 1024); // smaller basements so we get more per leaf
r = db->open(db, nullptr, "db", nullptr, DB_BTREE, DB_CREATE, 0666); CKERR(r);
DBT null_dbt, empty_dbt;
char empty_v;
dbt_init(&empty_dbt, &empty_v, 0);
DBT null_dbt, val_dbt;
char val_buf[100];
memset(val_buf, 0, sizeof val_buf);
dbt_init(&val_dbt, &val_buf, sizeof val_buf);
dbt_init(&null_dbt, nullptr, 0);
const int num_keys = 256 * 1000;
const int num_keys = 1<<11; //256 * 1000;
for (int i = 0; i < num_keys; i++) {
// insert it using a 4 byte key ..
......@@ -189,7 +187,7 @@ static void test_keylen_diff(enum overwrite_method method, bool control_test) {
DBT dbt;
dbt_init(&dbt, &key, key.size());
r = db->put(db, nullptr, &dbt, &empty_dbt, 0); CKERR(r);
r = db->put(db, nullptr, &dbt, &val_dbt, 0); CKERR(r);
}
// overwrite keys randomly, so we induce flushes and get better / realistic coverage
......@@ -217,7 +215,7 @@ static void test_keylen_diff(enum overwrite_method method, bool control_test) {
env->txn_begin(env, nullptr, &txn, DB_TXN_NOSYNC); CKERR(r);
switch (method) {
case VIA_INSERT: {
r = db->put(db, txn, &dbt, &empty_dbt, 0); CKERR(r);
r = db->put(db, txn, &dbt, &val_dbt, 0); CKERR(r);
break;
}
case VIA_DELETE: {
......@@ -228,12 +226,12 @@ static void test_keylen_diff(enum overwrite_method method, bool control_test) {
}
case VIA_UPDATE_OVERWRITE:
case VIA_UPDATE_DELETE: {
r = db->update(db, txn, &dbt, method == VIA_UPDATE_DELETE ? &null_dbt : &empty_dbt, 0); CKERR(r);
r = db->update(db, txn, &dbt, method == VIA_UPDATE_DELETE ? &null_dbt : &val_dbt, 0); CKERR(r);
break;
}
case VIA_UPDATE_OVERWRITE_BROADCAST:
case VIA_UPDATE_DELETE_BROADCAST: {
r = db->update_broadcast(db, txn, method == VIA_UPDATE_DELETE_BROADCAST ? &null_dbt : &empty_dbt, 0); CKERR(r);
r = db->update_broadcast(db, txn, method == VIA_UPDATE_DELETE_BROADCAST ? &null_dbt : &val_dbt, 0); CKERR(r);
if (i > 1 ) { // only need to test broadcast twice - one with abort, one without
txn->abort(txn); // we opened a txn so we should abort it before exiting
goto done;
......
......@@ -2995,12 +2995,7 @@ void ha_tokudb::init_hidden_prim_key_info(DB_TXN *txn) {
if (!(share->status & STATUS_PRIMARY_KEY_INIT)) {
int error = 0;
DBC* c = NULL;
error = share->key_file[primary_key]->cursor(
share->key_file[primary_key],
txn,
&c,
0
);
error = share->key_file[primary_key]->cursor(share->key_file[primary_key], txn, &c, 0);
assert(error == 0);
DBT key,val;
memset(&key, 0, sizeof(key));
......@@ -3220,11 +3215,12 @@ bool ha_tokudb::may_table_be_empty(DB_TXN *txn) {
error = share->file->cursor(share->file, txn, &tmp_cursor, 0);
if (error)
goto cleanup;
tmp_cursor->c_set_check_interrupt_callback(tmp_cursor, tokudb_killed_thd_callback, ha_thd());
if (empty_scan == TOKUDB_EMPTY_SCAN_LR)
error = tmp_cursor->c_getf_next(tmp_cursor, 0, smart_dbt_do_nothing, NULL);
else
error = tmp_cursor->c_getf_prev(tmp_cursor, 0, smart_dbt_do_nothing, NULL);
error = map_to_handler_error(error);
if (error == DB_NOTFOUND)
ret_val = true;
else
......@@ -3544,6 +3540,7 @@ int ha_tokudb::is_val_unique(bool* is_unique, uchar* record, KEY* key_info, uint
goto cleanup;
}
else if (error) {
error = map_to_handler_error(error);
goto cleanup;
}
if (ir_info.cmp) {
......@@ -4504,6 +4501,11 @@ int ha_tokudb::index_init(uint keynr, bool sorted) {
if (keynr < table->s->keys && table->key_info[keynr].option_struct->clustering)
key_read = false;
#if TOKU_CLUSTERING_IS_COVERING
if (keynr < table->s->keys && table->key_info[keynr].option_struct->clustering)
key_read = false;
#endif
last_cursor_error = 0;
range_lock_grabbed = false;
range_lock_grabbed_null = false;
......@@ -4530,6 +4532,7 @@ int ha_tokudb::index_init(uint keynr, bool sorted) {
cursor = NULL; // Safety
goto exit;
}
cursor->c_set_check_interrupt_callback(cursor, tokudb_killed_thd_callback, thd);
memset((void *) &last_key, 0, sizeof(last_key));
add_to_trx_handler_list();
......@@ -5852,16 +5855,14 @@ void ha_tokudb::position(const uchar * record) {
// 0, always success
//
int ha_tokudb::info(uint flag) {
TOKUDB_HANDLER_DBUG_ENTER("%d %lld", flag, (long long) share->rows);
int error;
DB_TXN* txn = NULL;
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
DB_BTREE_STAT64 dict_stats;
TOKUDB_HANDLER_DBUG_ENTER("%d", flag);
int error = 0;
#if TOKU_CLUSTERING_IS_COVERING
for (uint i=0; i < table->s->keys; i++)
if (table->key_info[i].option_struct->clustering)
table->covering_keys.set_bit(i);
if (key_is_clustering(&table->key_info[i]))
table->covering_keys.set_bit(i);
#endif
DB_TXN* txn = NULL;
if (flag & HA_STATUS_VARIABLE) {
// Just to get optimizations right
stats.records = share->rows + share->rows_from_locked_table;
......@@ -5891,18 +5892,12 @@ int ha_tokudb::info(uint flag) {
else {
goto cleanup;
}
error = share->file->get_fragmentation(
share->file,
&frag_info
);
error = share->file->get_fragmentation(share->file, &frag_info);
if (error) { goto cleanup; }
stats.delete_length = frag_info.unused_bytes;
error = share->file->stat64(
share->file,
txn,
&dict_stats
);
DB_BTREE_STAT64 dict_stats;
error = share->file->stat64(share->file, txn, &dict_stats);
if (error) { goto cleanup; }
stats.create_time = dict_stats.bt_create_time_sec;
......@@ -5938,6 +5933,7 @@ int ha_tokudb::info(uint flag) {
//
// this solution is much simpler than trying to maintain an
// accurate number of valid keys at the handlerton layer.
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
for (uint i = 0; i < curr_num_DBs; i++) {
// skip the primary key, skip dropped indexes
if (i == primary_key || share->key_file[i] == NULL) {
......@@ -6026,7 +6022,6 @@ int ha_tokudb::reset(void) {
TOKUDB_HANDLER_DBUG_RETURN(0);
}
//
// helper function that iterates through all DB's
// and grabs a lock (either read or write, but not both)
......@@ -6038,6 +6033,7 @@ int ha_tokudb::reset(void) {
// error otherwise
//
int ha_tokudb::acquire_table_lock (DB_TXN* trans, TABLE_LOCK_TYPE lt) {
TOKUDB_HANDLER_DBUG_ENTER("%p %s", trans, lt == lock_read ? "r" : "w");
int error = ENOSYS;
if (!num_DBs_locked_in_bulk) {
rw_rdlock(&share->num_DBs_lock);
......@@ -6069,10 +6065,9 @@ cleanup:
if (!num_DBs_locked_in_bulk) {
rw_unlock(&share->num_DBs_lock);
}
return error;
TOKUDB_HANDLER_DBUG_RETURN(error);
}
int ha_tokudb::create_txn(THD* thd, tokudb_trx_data* trx) {
int error;
ulong tx_isolation = thd_tx_isolation(thd);
......@@ -6249,7 +6244,6 @@ cleanup:
TABLE LOCK is done.
Under LOCK TABLES, each used tables will force a call to start_stmt.
*/
int ha_tokudb::start_stmt(THD * thd, thr_lock_type lock_type) {
TOKUDB_HANDLER_DBUG_ENTER("cmd %d lock %d %s", thd_sql_command(thd), lock_type, share->table_name);
if (0)
......@@ -6278,27 +6272,6 @@ int ha_tokudb::start_stmt(THD * thd, thr_lock_type lock_type) {
TOKUDB_HANDLER_TRACE("trx->stmt %p already existed", trx->stmt);
}
}
//
// we know we are in lock tables
// attempt to grab a table lock
// if fail, continue, do not return error
// This is because a failure ok, it simply means
// another active transaction has some locks.
// That other transaction modify this table
// until it is unlocked, therefore having acquire_table_lock
// potentially grab some locks but not all is ok.
//
if (lock.type <= TL_READ_NO_INSERT) {
acquire_table_lock(trx->sub_sp_level,lock_read);
}
else {
if (!(thd_sql_command(thd) == SQLCOM_CREATE_INDEX ||
thd_sql_command(thd) == SQLCOM_ALTER_TABLE ||
thd_sql_command(thd) == SQLCOM_DROP_INDEX ||
thd_sql_command(thd) == SQLCOM_TRUNCATE)) {
acquire_table_lock(trx->sub_sp_level,lock_write);
}
}
if (added_rows > deleted_rows) {
share->rows_from_locked_table = added_rows - deleted_rows;
}
......@@ -6809,6 +6782,14 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in
memset(&kc_info, 0, sizeof(kc_info));
#if 100000 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100999
// TokuDB does not support discover_table_names() and writes no files
// in the database directory, so automatic filename-based
// discover_table_names() doesn't work either. So, it must force .frm
// file to disk.
form->s->write_frm_image();
#endif
#if TOKU_INCLUDE_OPTION_STRUCTS
const srv_row_format_t row_format = (srv_row_format_t) form->s->option_struct->row_format;
#else
......@@ -7054,17 +7035,17 @@ int ha_tokudb::delete_or_rename_table (const char* from_name, const char* to_nam
error = status_db->cursor(status_db, txn, &status_cursor, 0);
if (error) { goto cleanup; }
status_cursor->c_set_check_interrupt_callback(status_cursor, tokudb_killed_thd_callback, thd);
while (error != DB_NOTFOUND) {
error = status_cursor->c_get(
status_cursor,
&curr_key,
&curr_val,
DB_NEXT
);
if (error && error != DB_NOTFOUND) { goto cleanup; }
if (error == DB_NOTFOUND) { break; }
error = status_cursor->c_get(status_cursor, &curr_key, &curr_val, DB_NEXT);
if (error && error != DB_NOTFOUND) {
error = map_to_handler_error(error);
goto cleanup;
}
if (error == DB_NOTFOUND) {
break;
}
HA_METADATA_KEY mk = *(HA_METADATA_KEY *)curr_key.data;
if (mk != hatoku_key_name) {
continue;
......@@ -7939,23 +7920,33 @@ void ha_tokudb::restore_drop_indexes(TABLE *table_arg, uint *key_num, uint num_o
}
int ha_tokudb::map_to_handler_error(int error) {
if (error == DB_LOCK_DEADLOCK)
switch (error) {
case DB_LOCK_DEADLOCK:
error = HA_ERR_LOCK_DEADLOCK;
if (error == DB_LOCK_NOTGRANTED)
break;
case DB_LOCK_NOTGRANTED:
error = HA_ERR_LOCK_WAIT_TIMEOUT;
break;
#if defined(HA_ERR_DISK_FULL)
if (error == ENOSPC) {
case ENOSPC:
error = HA_ERR_DISK_FULL;
}
break;
#endif
if (error == DB_KEYEXIST) {
case DB_KEYEXIST:
error = HA_ERR_FOUND_DUPP_KEY;
}
break;
#if defined(HA_ALTER_ERROR)
if (error == HA_ALTER_ERROR) {
case HA_ALTER_ERROR:
error = HA_ERR_UNSUPPORTED;
}
break;
#endif
case TOKUDB_INTERRUPTED:
error = ER_QUERY_INTERRUPTED;
break;
case TOKUDB_OUT_OF_LOCKS:
error = HA_ERR_LOCK_TABLE_FULL;
break;
}
return error;
}
......
......@@ -756,7 +756,9 @@ public:
uchar* buf,
DBT* key_to_compare
);
#if TOKU_INCLUDE_ROW_TYPE_COMPRESSION
enum row_type get_row_type() const;
#endif
private:
int read_full_row(uchar * buf);
int __close();
......
......@@ -102,7 +102,7 @@ struct analyze_progress_extra {
static int analyze_progress(void *v_extra, uint64_t rows) {
struct analyze_progress_extra *extra = (struct analyze_progress_extra *) v_extra;
THD *thd = extra->thd;
if (thd->killed)
if (thd_killed(thd))
return ER_ABORTING_CONNECTION;
time_t t_now = time(0);
......@@ -190,7 +190,7 @@ typedef struct hot_optimize_context {
static int hot_poll_fun(void *extra, float progress) {
HOT_OPTIMIZE_CONTEXT context = (HOT_OPTIMIZE_CONTEXT)extra;
if (context->thd->killed) {
if (thd_killed(context->thd)) {
sprintf(context->write_status_msg, "The process has been killed, aborting hot optimize.");
return ER_ABORTING_CONNECTION;
}
......@@ -271,7 +271,7 @@ struct check_context {
static int ha_tokudb_check_progress(void *extra, float progress) {
struct check_context *context = (struct check_context *) extra;
int result = 0;
if (context->thd->killed)
if (thd_killed(context->thd))
result = ER_ABORTING_CONNECTION;
return result;
}
......
......@@ -113,6 +113,7 @@ PATENT RIGHTS GRANT:
#endif
#define TOKU_INCLUDE_OPTION_STRUCTS 1
#define TOKU_OPTIMIZE_WITH_RECREATE 1
#define TOKU_CLUSTERING_IS_COVERING 1
#elif 50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799
// mysql 5.7 with no patches
......
This diff is collapsed.
......@@ -427,7 +427,12 @@ static uint64_t tokudb_get_killed_time_callback(uint64_t default_killed_time) {
static int tokudb_killed_callback(void) {
THD *thd = current_thd;
return thd->killed;
return thd_killed(thd);
}
static bool tokudb_killed_thd_callback(void *extra) {
THD *thd = static_cast<THD *>(extra);
return thd_killed(thd) != 0;
}
enum {
......
......@@ -98,6 +98,36 @@ count(*)
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
set tokudb_bulk_fetch=OFF;
SELECT count(*) from t;
count(*)
......@@ -159,6 +189,36 @@ count(*)
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
SELECT count(*) from t;
count(*)
1048576
1
set tokudb_bulk_fetch=ON;
SELECT count(*) from t where num > 700000;
......@@ -221,6 +281,36 @@ count(*)
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
set tokudb_bulk_fetch=OFF;
SELECT count(*) from t where num > 700000;
count(*)
......@@ -282,5 +372,35 @@ count(*)
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
SELECT count(*) from t where num > 700000;
count(*)
348576
1
drop table t;
......@@ -6,64 +6,56 @@ source include/have_tokudb.inc;
source include/big_test.inc;
set default_storage_engine='tokudb';
disable_warnings;
drop table if exists t,t1,t2;
drop table if exists t1,t2;
enable_warnings;
let $maxq = 10;
let $debug = 0;
CREATE TABLE `t` (
CREATE TABLE `t1` (
`num` int(10) unsigned auto_increment NOT NULL,
`val` varchar(32) DEFAULT NULL,
PRIMARY KEY (`num`)
);
# put 8M rows into t
INSERT INTO t values (null,null);
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
SELECT count(*) FROM t;
# Create first table from source table t
CREATE TABLE `t1` (
`num` int(10) unsigned NOT NULL,
`val` varchar(32) DEFAULT NULL,
PRIMARY KEY (`num`)
) as select * from t;
# put 1M rows into t1
INSERT INTO t1 values (null,null);
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
SELECT count(*) FROM t1;
let $s = `select to_seconds(now())`;
let $maxq = 10;
set tokudb_bulk_fetch=ON;
let $s = `select unix_timestamp()`;
let $i = 0;
while ($i < $maxq) {
SELECT count(*) from t1;
CREATE TABLE t2 AS SELECT count(*) from t1;
DROP TABLE t2;
inc $i;
}
let $time_elapsed_select = `select to_seconds(now()) - $s`;
let $time_elapsed_on = `select unix_timestamp() - $s`;
# The following line can be used to display the time elapsed data
# which could be useful for debugging.
#echo Index scans took $time_elapsed_select seconds.;
let $s = `select to_seconds(now())`;
set tokudb_bulk_fetch=OFF;
let $s = `select unix_timestamp()`;
let $i = 0;
while ($i < $maxq) {
CREATE TABLE t2 AS SELECT count(*) from t1;
......@@ -71,48 +63,41 @@ while ($i < $maxq) {
inc $i;
}
let $time_elapsed_create_select = `select to_seconds(now()) - $s`;
# The following line can be used to display the time elapsed data
# which could be useful for debugging.
#echo Index scans took $time_elapsed_create_select seconds.;
let $time_elapsed_off = `select unix_timestamp() - $s`;
# This check evaluates whether the time elapsed during the create select statement is on par
# with the select statement, which will confirm that bulk fetch is in fact being used.
let $verdict = `select abs($time_elapsed_create_select - $time_elapsed_select) <= $time_elapsed_select`;
# Check that the time with bulk fetch off is at least twice that whith bulk fetch on
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`;
echo $verdict;
if ($debug) { echo index $verdict $time_elapsed_on $time_elapsed_off; }
if (!$verdict) { echo index $time_elapsed_on $time_elapsed_off; }
let $maxrq = 30;
let $maxq = 30;
let $s = `select to_seconds(now())`;
set tokudb_bulk_fetch=ON;
let $s = `select unix_timestamp()`;
let $i = 0;
while ($i < $maxrq) {
SELECT count(*) from t1 where num > 7000000;
while ($i < $maxq) {
CREATE TABLE t2 AS SELECT count(*) from t1 where num > 700000;
DROP TABLE t2;
inc $i;
}
let $time_elapsed_select = `select to_seconds(now()) - $s`;
let $time_elapsed_on = `select unix_timestamp() - $s`;
# The following line can be used to display the time elapsed data
# which could be useful for debugging.
#echo Index scans took $time_elapsed_select seconds.;
let $s = `select to_seconds(now())`;
set tokudb_bulk_fetch=OFF;
let $s = `select unix_timestamp()`;
let $i = 0;
while ($i < $maxrq) {
CREATE TABLE t2 AS SELECT count(*) from t1 where num > 7000000;
while ($i < $maxq) {
CREATE TABLE t2 AS SELECT count(*) from t1 where num > 700000;
DROP TABLE t2;
inc $i;
}
let $time_elapsed_create_select = `select to_seconds(now()) - $s`;
# The following line can be used to display the time elapsed data
# which could be useful for debugging.
#echo Index scans took $time_elapsed_create_select seconds.;
let $time_elapsed_off = `select unix_timestamp() - $s`;
# This check evaluates whether the time elapsed during the create select statement is on par
# with the select statement, which will confirm that bulk fetch is in fact being used.
let $verdict = `select abs($time_elapsed_create_select - $time_elapsed_select) <= $time_elapsed_select`;
# Check that the time with bulk fetch off is at least twice that whith bulk fetch on
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 2 * $time_elapsed_on`;
echo $verdict;
if ($debug) { echo range $verdict $time_elapsed_on $time_elapsed_off; }
if (!$verdict) { echo range $time_elapsed_on $time_elapsed_off; }
drop table t,t1;
drop table t1;
......@@ -7,137 +7,96 @@ source include/have_partition.inc;
source include/big_test.inc;
set default_storage_engine='tokudb';
disable_warnings;
drop table if exists t,t1,t2,t3;
drop table if exists t1,t2;
enable_warnings;
let $maxq = 10;
CREATE TABLE `t` (
`num` int(10) unsigned auto_increment NOT NULL,
`val` varchar(32) DEFAULT NULL,
PRIMARY KEY (`num`)
);
# put 8M rows into t
INSERT INTO t values (null,null);
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
INSERT INTO t SELECT null,val FROM t;
SELECT count(*) FROM t;
# Create base table (control table) from source table t
CREATE TABLE `t1` (
`num` int(10) unsigned NOT NULL,
`val` varchar(32) DEFAULT NULL,
PRIMARY KEY (`num`)
) as select * from t;
let $debug = 0;
# Create source hash partitioned table from source table t
CREATE TABLE `t2` (
`num` int(10) unsigned NOT NULL,
CREATE TABLE `t1` (
`num` int(10) unsigned auto_increment NOT NULL,
`val` varchar(32) DEFAULT NULL,
PRIMARY KEY (`num`)
) PARTITION BY HASH (num)
PARTITIONS 8 as select * from t;
let $s = `select to_seconds(now())`;
) PARTITION BY HASH (num) PARTITIONS 8;
# put 1M rows into t1
INSERT INTO t1 values (null,null);
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
INSERT INTO t1 SELECT null,val FROM t1;
SELECT count(*) FROM t1;
let $maxq = 20;
set tokudb_bulk_fetch=ON;
let $s = `select unix_timestamp()`;
let $i = 0;
while ($i < $maxq) {
CREATE TABLE `t3` (`x` bigint);
SELECT count(*) from t1;
DROP TABLE t3;
CREATE TABLE t2 AS SELECT count(*) from t1;
DROP TABLE t2;
inc $i;
}
let $time_elapsed_select = `select to_seconds(now()) - $s`;
# The following line can be used to display the time elapsed data
# which could be useful for debugging.
#echo Index scans took $time_elapsed_select seconds.;
let $time_elapsed_on = `select unix_timestamp() - $s`;
let $s = `select to_seconds(now())`;
set tokudb_bulk_fetch=OFF;
let $s = `select unix_timestamp()`;
let $i = 0;
while ($i < $maxq) {
CREATE TABLE t3 AS SELECT count(*) from t2;
DROP TABLE t3;
CREATE TABLE t2 AS SELECT count(*) from t1;
DROP TABLE t2;
inc $i;
}
let $time_elapsed_off = `select unix_timestamp() - $s`;
let $time_elapsed_create_select = `select to_seconds(now()) - $s`;
# The following line can be used to display the time elapsed data
# which could be useful for debugging.
#echo Index scans took $time_elapsed_create_select seconds.;
# This check evaluates whether the time elapsed during the create select statement is on par
# with the select statement, which will confirm that bulk fetch is in fact being used.
# Additionally, it is important to note that 1.5 is the multiplier applied to the time_elapsed_select
# value because it appears that MySQL 5.5.39 uses a sorted index scan during the create select statement
# while Percona Server 5.6 uses an unsorted index scan.
# The issue has been resolved in MySQL 5.6 but still persists in Maria 10.0.12
# in the defect found at https://mariadb.atlassian.net/browse/MDEV-6547.
let $verdict = `select abs($time_elapsed_create_select - $time_elapsed_select) <= 1.5 * $time_elapsed_select`;
# check that bulk fetch on is at least 1.5 times faster than bulk fetch off
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 1.5 * $time_elapsed_on`;
echo $verdict;
if (!$verdict) { echo index scan t2 $time_elapsed_create_select $time_elapsed_select; }
if ($debug) { echo index $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo index $time_elapsed_off $time_elapsed_on; }
let $maxrq = 30;
let $maxq = 20;
let $s = `select to_seconds(now())`;
set tokudb_bulk_fetch=ON;
let $s = `select unix_timestamp()`;
let $i = 0;
while ($i < $maxrq) {
CREATE TABLE `t3` (`x` bigint);
SELECT count(*) from t1 where num > 7000000;
DROP TABLE t3;
while ($i < $maxq) {
CREATE TABLE t2 AS SELECT count(*) from t1 where num > 700000;
DROP TABLE t2;
inc $i;
}
let $time_elapsed_select = `select to_seconds(now()) - $s`;
# The following line can be used to display the time elapsed data
# which could be useful for debugging.
#echo Index scans took $time_elapsed_select seconds.;
let $time_elapsed_on = `select unix_timestamp() - $s`;
let $s = `select to_seconds(now())`;
set tokudb_bulk_fetch=OFF;
let $s = `select unix_timestamp()`;
let $i = 0;
while ($i < $maxrq) {
CREATE TABLE t3 AS SELECT count(*) from t2 where num > 7000000;
DROP TABLE t3;
while ($i < $maxq) {
CREATE TABLE t2 AS SELECT count(*) from t1 where num > 700000;
DROP TABLE t2;
inc $i;
}
let $time_elapsed_off = `select unix_timestamp() - $s`;
let $time_elapsed_create_select = `select to_seconds(now()) - $s`;
# The following line can be used to display the time elapsed data
# which could be useful for debugging.
#echo Index scans took $time_elapsed_create_select seconds.;
# This check evaluates whether the time elapsed during the create select statement is on par
# with the select statement, which will confirm that bulk fetch is in fact being used.
# Additionally, it is important to note that 1.5 is the multiplier applied to the time_elapsed_select
# value because it appears that MySQL 5.5.39 uses a sorted index scan during the create select statement
# while Percona Server 5.6 uses an unsorted index scan.
# The issue has been resolved in MySQL 5.6 but still persists in Maria 10.0.12
# in the defect found at https://mariadb.atlassian.net/browse/MDEV-6547.
let $verdict = `select abs($time_elapsed_create_select - $time_elapsed_select) <= 1.5 * $time_elapsed_select`;
# check that bulk fetch on is at least 1.5 times faster than bulk fetch off
let $verdict = `select $time_elapsed_on > 0 && $time_elapsed_off >= 1.5 * $time_elapsed_on`;
echo $verdict;
if (!$verdict) { echo range scan t2 $time_elapsed_create_select $time_elapsed_select; }
if ($debug) { echo range $verdict $time_elapsed_off $time_elapsed_on; }
if (!$verdict) { echo range $time_elapsed_off $time_elapsed_on; }
drop table t,t1,t2;
drop table t1;
......@@ -8,6 +8,8 @@ disable_warnings;
drop table if exists t;
enable_warnings;
let $debug = 0;
CREATE TABLE `t` (id bigint not null auto_increment primary key, val bigint not null default 0);
# put 8M rows into t
......@@ -42,27 +44,28 @@ let $maxq = 10;
# measure the time to do $maxq deletes from t that affect no rows with bulk fetch ON
set tokudb_bulk_fetch = ON;
let $s = `select to_seconds(now())`;
let $s = `select unix_timestamp()`;
let $i = 0;
while ($i < $maxq) {
delete from t where val > 0;
inc $i;
}
let $time_elapsed_bf_on = `select to_seconds(now()) - $s`;
let $time_elapsed_bf_on = `select unix_timestamp() - $s`;
# measure the time to do $maxq deletes from t that affect no rows with bulk fetch OFF
set tokudb_bulk_fetch = OFF;
let $s = `select to_seconds(now())`;
let $s = `select unix_timestamp()`;
let $i = 0;
while ($i < $maxq) {
delete from t where val > 0;
inc $i;
}
let $time_elapsed_bf_off = `select to_seconds(now()) - $s`;
let $time_elapsed_bf_off = `select unix_timestamp() - $s`;
# verify that a delete scan with bulk fetch ON is at least 2 times faster than with bulk fetch OFF
let $verdict = `select $time_elapsed_bf_off > $time_elapsed_bf_on && ($time_elapsed_bf_off - $time_elapsed_bf_on) / $time_elapsed_bf_on >= 2`;
let $verdict = `select $time_elapsed_bf_on > 0 && $time_elapsed_bf_off >= 2 * $time_elapsed_bf_on`;
echo $verdict;
if (!$verdict) { echo $time_elapsed_bf_on $time_elapsed_bf_off; }
if ($debug) { echo range $verdict $time_elapsed_bf_on $time_elapsed_bf_off; }
if (!$verdict) { echo range $time_elapsed_bf_on $time_elapsed_bf_off; }
drop table t;
......@@ -66,9 +66,10 @@ let $time_bf_off = `select unix_timestamp() - $s`;
if ($debug) { echo index scans took $time_bf_off.; }
# check that the scan time with bulk fetch off is at least 1.5 times as long as with bulk fetch on
let $verdict = `select $time_bf_off > $time_bf_on && $time_bf_off >= 1.5 * $time_bf_on`;
let $verdict = `select $time_bf_on > 0 && $time_bf_off >= 1.5 * $time_bf_on`;
echo $verdict;
if (!$verdict) { echo index scan $time_bf_on $time_bf_off; }
if ($debug) { echo index $verdict $time_bf_on $time_bf_off; }
if (!$verdict) { echo index $time_bf_on $time_bf_off; }
set tokudb_bulk_fetch=ON;
let $s = `select unix_timestamp()`;
......@@ -93,8 +94,9 @@ let $time_bf_off = `select unix_timestamp() - $s`;
if ($debug) { echo range scans took $time_bf_off.; }
# check that the scan time with bulk fetch off is at least 1.5 times as long as with bulk fetch on
let $verdict = `select $time_bf_off > $time_bf_on && $time_bf_off >= 1.5 * $time_bf_on`;
let $verdict = `select $time_bf_on > 0 && $time_bf_off >= 1.5 * $time_bf_on`;
echo $verdict;
if (!$verdict) { echo range scan $time_bf_on $time_bf_off; }
if ($debug) { echo range $verdict $time_bf_on $time_bf_off; }
if (!$verdict) { echo range $time_bf_on $time_bf_off; }
drop table t;
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
drop table if exists t1,t2;
CREATE TABLE t1(`a` INT) ENGINE=TokuDB;
CREATE TABLE t2(`a` INT) ENGINE=InnoDB;
begin;
insert into t1 values (0);
insert into t2 values (0);
commit;
begin;
insert into t1 values (1);
insert into t2 values (1);
commit;
include/diff_tables.inc [test.t1, test.t2]
drop table t1,t2;
......@@ -4,6 +4,6 @@ create table t (id int primary key);
begin;
insert into t values (1),(2);
select * from information_schema.tokudb_fractal_tree_info;
ERROR HY000: Unknown error -30994
ERROR HY000: Got error -30994 from storage engine
commit;
drop table t;
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
source include/have_tokudb.inc;
source include/have_innodb.inc;
disable_warnings;
drop table if exists t1,t2;
enable_warnings;
CREATE TABLE t1(`a` INT) ENGINE=TokuDB;
CREATE TABLE t2(`a` INT) ENGINE=InnoDB;
let $n=0;
while ($n < 2) {
begin;
eval insert into t1 values ($n);
eval insert into t2 values ($n);
commit;
inc $n;
}
let $diff_tables= test.t1, test.t2;
source include/diff_tables.inc;
drop table t1,t2;
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment