Commit 4cdae9c1 authored by Vicențiu Ciorbaru's avatar Vicențiu Ciorbaru

Merge branch 'merge-tokudb-5.6' into 10.0

parents c1b3aaa2 97c53cdf
SET(TOKUDB_VERSION 5.6.35-80.0)
SET(TOKUDB_VERSION 5.6.36-82.0)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF(CMAKE_VERSION VERSION_LESS "2.8.9")
MESSAGE(STATUS "CMake 2.8.9 or higher is required by TokuDB")
......@@ -140,7 +140,8 @@ SET(TOKUDB_SOURCES
tokudb_background.cc
tokudb_information_schema.cc
tokudb_sysvars.cc
tokudb_thread.cc)
tokudb_thread.cc
tokudb_dir_cmd.cc)
MYSQL_ADD_PLUGIN(tokudb ${TOKUDB_SOURCES} STORAGE_ENGINE MODULE_ONLY
LINK_LIBRARIES tokufractaltree_static tokuportability_static ${ZLIB_LIBRARY} stdc++)
SET(CMAKE_MODULE_LINKER_FLAGS_RELEASE "${CMAKE_MODULE_LINKER_FLAGS_RELEASE} -flto -fuse-linker-plugin")
......
......@@ -425,6 +425,9 @@ static void print_db_env_struct (void) {
"bool (*set_dir_per_db)(DB_ENV *, bool new_val)",
"bool (*get_dir_per_db)(DB_ENV *)",
"const char *(*get_data_dir)(DB_ENV *env)",
"int (*dirtool_attach)(DB_ENV *, DB_TXN *, const char *, const char *)",
"int (*dirtool_detach)(DB_ENV *, DB_TXN *, const char *)",
"int (*dirtool_move)(DB_ENV *, DB_TXN *, const char *, const char *)",
NULL};
sort_and_dump_fields("db_env", true, extra);
......
......@@ -3900,25 +3900,34 @@ struct keyrange_compare_s {
};
// TODO: Remove me, I'm boring
static int keyrange_compare(DBT const &kdbt, const struct keyrange_compare_s &s) {
static int keyrange_compare(DBT const &kdbt,
const struct keyrange_compare_s &s) {
return s.ft->cmp(&kdbt, s.key);
}
static void
keysrange_in_leaf_partition (FT_HANDLE ft_handle, FTNODE node,
DBT* key_left, DBT* key_right,
int left_child_number, int right_child_number, uint64_t estimated_num_rows,
uint64_t *less, uint64_t* equal_left, uint64_t* middle,
uint64_t* equal_right, uint64_t* greater, bool* single_basement_node)
static void keysrange_in_leaf_partition(FT_HANDLE ft_handle,
FTNODE node,
DBT *key_left,
DBT *key_right,
int left_child_number,
int right_child_number,
uint64_t estimated_num_rows,
uint64_t *less,
uint64_t *equal_left,
uint64_t *middle,
uint64_t *equal_right,
uint64_t *greater,
bool *single_basement_node)
// If the partition is in main memory then estimate the number
// Treat key_left == NULL as negative infinity
// Treat key_right == NULL as positive infinity
{
paranoid_invariant(node->height == 0); // we are in a leaf
paranoid_invariant(node->height == 0); // we are in a leaf
paranoid_invariant(!(key_left == NULL && key_right != NULL));
paranoid_invariant(left_child_number <= right_child_number);
bool single_basement = left_child_number == right_child_number;
paranoid_invariant(!single_basement || (BP_STATE(node, left_child_number) == PT_AVAIL));
paranoid_invariant(!single_basement ||
(BP_STATE(node, left_child_number) == PT_AVAIL));
if (BP_STATE(node, left_child_number) == PT_AVAIL) {
int r;
// The partition is in main memory then get an exact count.
......@@ -3926,29 +3935,35 @@ keysrange_in_leaf_partition (FT_HANDLE ft_handle, FTNODE node,
BASEMENTNODE bn = BLB(node, left_child_number);
uint32_t idx_left = 0;
// if key_left is NULL then set r==-1 and idx==0.
r = key_left ? bn->data_buffer.find_zero<decltype(s_left), keyrange_compare>(s_left, nullptr, nullptr, nullptr, &idx_left) : -1;
r = key_left
? bn->data_buffer.find_zero<decltype(s_left), keyrange_compare>(
s_left, nullptr, nullptr, nullptr, &idx_left)
: -1;
*less = idx_left;
*equal_left = (r==0) ? 1 : 0;
*equal_left = (r == 0) ? 1 : 0;
uint32_t size = bn->data_buffer.num_klpairs();
uint32_t idx_right = size;
r = -1;
if (single_basement && key_right) {
struct keyrange_compare_s s_right = {ft_handle->ft, key_right};
r = bn->data_buffer.find_zero<decltype(s_right), keyrange_compare>(s_right, nullptr, nullptr, nullptr, &idx_right);
r = bn->data_buffer.find_zero<decltype(s_right), keyrange_compare>(
s_right, nullptr, nullptr, nullptr, &idx_right);
}
*middle = idx_right - idx_left - *equal_left;
*equal_right = (r==0) ? 1 : 0;
*equal_right = (r == 0) ? 1 : 0;
*greater = size - idx_right - *equal_right;
} else {
paranoid_invariant(!single_basement);
uint32_t idx_left = estimated_num_rows / 2;
if (!key_left) {
//Both nullptr, assume key_left belongs before leftmost entry, key_right belongs after rightmost entry
// Both nullptr, assume key_left belongs before leftmost entry,
// key_right belongs after rightmost entry
idx_left = 0;
paranoid_invariant(!key_right);
}
// Assume idx_left and idx_right point to where key_left and key_right belong, (but are not there).
// Assume idx_left and idx_right point to where key_left and key_right
// belong, (but are not there).
*less = idx_left;
*equal_left = 0;
*middle = estimated_num_rows - idx_left;
......@@ -3958,44 +3973,76 @@ keysrange_in_leaf_partition (FT_HANDLE ft_handle, FTNODE node,
*single_basement_node = single_basement;
}
static int
toku_ft_keysrange_internal (FT_HANDLE ft_handle, FTNODE node,
DBT* key_left, DBT* key_right, bool may_find_right,
uint64_t* less, uint64_t* equal_left, uint64_t* middle,
uint64_t* equal_right, uint64_t* greater, bool* single_basement_node,
uint64_t estimated_num_rows,
ftnode_fetch_extra *min_bfe, // set up to read a minimal read.
ftnode_fetch_extra *match_bfe, // set up to read a basement node iff both keys in it
struct unlockers *unlockers, ANCESTORS ancestors, const pivot_bounds &bounds)
// Implementation note: Assign values to less, equal, and greater, and then on the way out (returning up the stack) we add more values in.
static int toku_ft_keysrange_internal(
FT_HANDLE ft_handle,
FTNODE node,
DBT *key_left,
DBT *key_right,
bool may_find_right,
uint64_t *less,
uint64_t *equal_left,
uint64_t *middle,
uint64_t *equal_right,
uint64_t *greater,
bool *single_basement_node,
uint64_t estimated_num_rows,
ftnode_fetch_extra *min_bfe, // set up to read a minimal read.
ftnode_fetch_extra
*match_bfe, // set up to read a basement node iff both keys in it
struct unlockers *unlockers,
ANCESTORS ancestors,
const pivot_bounds &bounds)
// Implementation note: Assign values to less, equal, and greater, and then on
// the way out (returning up the stack) we add more values in.
{
int r = 0;
// if KEY is NULL then use the leftmost key.
int left_child_number = key_left ? toku_ftnode_which_child (node, key_left, ft_handle->ft->cmp) : 0;
int right_child_number = node->n_children; // Sentinel that does not equal left_child_number.
int left_child_number =
key_left ? toku_ftnode_which_child(node, key_left, ft_handle->ft->cmp)
: 0;
int right_child_number =
node->n_children; // Sentinel that does not equal left_child_number.
if (may_find_right) {
right_child_number = key_right ? toku_ftnode_which_child (node, key_right, ft_handle->ft->cmp) : node->n_children - 1;
right_child_number =
key_right
? toku_ftnode_which_child(node, key_right, ft_handle->ft->cmp)
: node->n_children - 1;
}
uint64_t rows_per_child = estimated_num_rows / node->n_children;
if (node->height == 0) {
keysrange_in_leaf_partition(ft_handle, node, key_left, key_right, left_child_number, right_child_number,
rows_per_child, less, equal_left, middle, equal_right, greater, single_basement_node);
*less += rows_per_child * left_child_number;
keysrange_in_leaf_partition(ft_handle,
node,
key_left,
key_right,
left_child_number,
right_child_number,
rows_per_child,
less,
equal_left,
middle,
equal_right,
greater,
single_basement_node);
*less += rows_per_child * left_child_number;
if (*single_basement_node) {
*greater += rows_per_child * (node->n_children - left_child_number - 1);
*greater +=
rows_per_child * (node->n_children - left_child_number - 1);
} else {
*middle += rows_per_child * (node->n_children - left_child_number - 1);
*middle +=
rows_per_child * (node->n_children - left_child_number - 1);
}
} else {
// do the child.
struct ancestors next_ancestors = {node, left_child_number, ancestors};
BLOCKNUM childblocknum = BP_BLOCKNUM(node, left_child_number);
uint32_t fullhash = compute_child_fullhash(ft_handle->ft->cf, node, left_child_number);
uint32_t fullhash =
compute_child_fullhash(ft_handle->ft->cf, node, left_child_number);
FTNODE childnode;
bool msgs_applied = false;
bool child_may_find_right = may_find_right && left_child_number == right_child_number;
bool child_may_find_right =
may_find_right && left_child_number == right_child_number;
r = toku_pin_ftnode_for_query(
ft_handle,
childblocknum,
......@@ -4006,27 +4053,45 @@ toku_ft_keysrange_internal (FT_HANDLE ft_handle, FTNODE node,
child_may_find_right ? match_bfe : min_bfe,
false,
&childnode,
&msgs_applied
);
&msgs_applied);
paranoid_invariant(!msgs_applied);
if (r != TOKUDB_TRY_AGAIN) {
assert_zero(r);
struct unlock_ftnode_extra unlock_extra = {ft_handle,childnode,false};
struct unlockers next_unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, unlockers};
const pivot_bounds next_bounds = bounds.next_bounds(node, left_child_number);
r = toku_ft_keysrange_internal(ft_handle, childnode, key_left, key_right, child_may_find_right,
less, equal_left, middle, equal_right, greater, single_basement_node,
rows_per_child, min_bfe, match_bfe, &next_unlockers, &next_ancestors, next_bounds);
struct unlock_ftnode_extra unlock_extra = {
ft_handle, childnode, false};
struct unlockers next_unlockers = {
true, unlock_ftnode_fun, (void *)&unlock_extra, unlockers};
const pivot_bounds next_bounds =
bounds.next_bounds(node, left_child_number);
r = toku_ft_keysrange_internal(ft_handle,
childnode,
key_left,
key_right,
child_may_find_right,
less,
equal_left,
middle,
equal_right,
greater,
single_basement_node,
rows_per_child,
min_bfe,
match_bfe,
&next_unlockers,
&next_ancestors,
next_bounds);
if (r != TOKUDB_TRY_AGAIN) {
assert_zero(r);
*less += rows_per_child * left_child_number;
*less += rows_per_child * left_child_number;
if (*single_basement_node) {
*greater += rows_per_child * (node->n_children - left_child_number - 1);
*greater += rows_per_child *
(node->n_children - left_child_number - 1);
} else {
*middle += rows_per_child * (node->n_children - left_child_number - 1);
*middle += rows_per_child *
(node->n_children - left_child_number - 1);
}
assert(unlockers->locked);
......@@ -4037,10 +4102,21 @@ toku_ft_keysrange_internal (FT_HANDLE ft_handle, FTNODE node,
return r;
}
void toku_ft_keysrange(FT_HANDLE ft_handle, DBT* key_left, DBT* key_right, uint64_t *less_p, uint64_t* equal_left_p, uint64_t* middle_p, uint64_t* equal_right_p, uint64_t* greater_p, bool* middle_3_exact_p)
// Effect: Return an estimate of the number of keys to the left, the number equal (to left key), number between keys, number equal to right key, and the number to the right of both keys.
void toku_ft_keysrange(FT_HANDLE ft_handle,
DBT *key_left,
DBT *key_right,
uint64_t *less_p,
uint64_t *equal_left_p,
uint64_t *middle_p,
uint64_t *equal_right_p,
uint64_t *greater_p,
bool *middle_3_exact_p)
// Effect: Return an estimate of the number of keys to the left, the number
// equal (to left key), number between keys, number equal to right key, and the
// number to the right of both keys.
// The values are an estimate.
// If you perform a keyrange on two keys that are in the same basement, equal_less, middle, and equal_right will be exact.
// If you perform a keyrange on two keys that are in the same basement,
// equal_less, middle, and equal_right will be exact.
// 4184: What to do with a NULL key?
// key_left==NULL is treated as -infinity
// key_right==NULL is treated as +infinity
......@@ -4048,10 +4124,21 @@ void toku_ft_keysrange(FT_HANDLE ft_handle, DBT* key_left, DBT* key_right, uint6
// key_right can be non-null only if key_left is non-null;
{
if (!key_left && key_right) {
// Simplify internals by only supporting key_right != null when key_left != null
// If key_right != null and key_left == null, then swap them and fix up numbers.
uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0;
toku_ft_keysrange(ft_handle, key_right, nullptr, &less, &equal_left, &middle, &equal_right, &greater, middle_3_exact_p);
// Simplify internals by only supporting key_right != null when key_left
// != null
// If key_right != null and key_left == null, then swap them and fix up
// numbers.
uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0,
greater = 0;
toku_ft_keysrange(ft_handle,
key_right,
nullptr,
&less,
&equal_left,
&middle,
&equal_right,
&greater,
middle_3_exact_p);
*less_p = 0;
*equal_left_p = 0;
*middle_p = less;
......@@ -4064,98 +4151,132 @@ void toku_ft_keysrange(FT_HANDLE ft_handle, DBT* key_left, DBT* key_right, uint6
paranoid_invariant(!(!key_left && key_right));
ftnode_fetch_extra min_bfe;
ftnode_fetch_extra match_bfe;
min_bfe.create_for_min_read(ft_handle->ft); // read pivot keys but not message buffers
match_bfe.create_for_keymatch(ft_handle->ft, key_left, key_right, false, false); // read basement node only if both keys in it.
try_again:
min_bfe.create_for_min_read(
ft_handle->ft); // read pivot keys but not message buffers
match_bfe.create_for_keymatch(
ft_handle->ft,
key_left,
key_right,
false,
false); // read basement node only if both keys in it.
try_again : {
uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0;
bool single_basement_node = false;
FTNODE node = NULL;
{
uint64_t less = 0, equal_left = 0, middle = 0, equal_right = 0, greater = 0;
bool single_basement_node = false;
FTNODE node = NULL;
{
uint32_t fullhash;
CACHEKEY root_key;
toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash);
toku_pin_ftnode(
ft_handle->ft,
root_key,
fullhash,
&match_bfe,
PL_READ, // may_modify_node, cannot change root during keyrange
&node,
true
);
}
uint32_t fullhash;
CACHEKEY root_key;
toku_calculate_root_offset_pointer(ft_handle->ft, &root_key, &fullhash);
toku_pin_ftnode(
ft_handle->ft,
root_key,
fullhash,
&match_bfe,
PL_READ, // may_modify_node, cannot change root during keyrange
&node,
true);
}
struct unlock_ftnode_extra unlock_extra = {ft_handle,node,false};
struct unlockers unlockers = {true, unlock_ftnode_fun, (void*)&unlock_extra, (UNLOCKERS)NULL};
struct unlock_ftnode_extra unlock_extra = {ft_handle, node, false};
struct unlockers unlockers = {
true, unlock_ftnode_fun, (void *)&unlock_extra, (UNLOCKERS)NULL};
{
int r;
int64_t numrows = ft_handle->ft->in_memory_stats.numrows;
if (numrows < 0)
numrows = 0; // prevent appearance of a negative number
r = toku_ft_keysrange_internal (ft_handle, node, key_left, key_right, true,
&less, &equal_left, &middle, &equal_right, &greater,
&single_basement_node, numrows,
&min_bfe, &match_bfe, &unlockers, (ANCESTORS)NULL, pivot_bounds::infinite_bounds());
{
int r;
int64_t numrows = ft_handle->ft->in_memory_logical_rows;
if (numrows < 0)
numrows = 0; // prevent appearance of a negative number
r = toku_ft_keysrange_internal(ft_handle,
node,
key_left,
key_right,
true,
&less,
&equal_left,
&middle,
&equal_right,
&greater,
&single_basement_node,
numrows,
&min_bfe,
&match_bfe,
&unlockers,
(ANCESTORS)NULL,
pivot_bounds::infinite_bounds());
assert(r == 0 || r == TOKUDB_TRY_AGAIN);
if (r == TOKUDB_TRY_AGAIN) {
assert(!unlockers.locked);
goto try_again;
}
// May need to do a second query.
if (!single_basement_node && key_right != nullptr) {
// "greater" is stored in "middle"
invariant_zero(equal_right);
invariant_zero(greater);
uint64_t less2 = 0, equal_left2 = 0, middle2 = 0, equal_right2 = 0,
greater2 = 0;
bool ignore;
r = toku_ft_keysrange_internal(ft_handle,
node,
key_right,
nullptr,
false,
&less2,
&equal_left2,
&middle2,
&equal_right2,
&greater2,
&ignore,
numrows,
&min_bfe,
&match_bfe,
&unlockers,
(ANCESTORS) nullptr,
pivot_bounds::infinite_bounds());
assert(r == 0 || r == TOKUDB_TRY_AGAIN);
if (r == TOKUDB_TRY_AGAIN) {
assert(!unlockers.locked);
goto try_again;
}
// May need to do a second query.
if (!single_basement_node && key_right != nullptr) {
// "greater" is stored in "middle"
invariant_zero(equal_right);
invariant_zero(greater);
uint64_t less2 = 0, equal_left2 = 0, middle2 = 0, equal_right2 = 0, greater2 = 0;
bool ignore;
r = toku_ft_keysrange_internal (ft_handle, node, key_right, nullptr, false,
&less2, &equal_left2, &middle2, &equal_right2, &greater2,
&ignore, numrows,
&min_bfe, &match_bfe, &unlockers, (ANCESTORS)nullptr, pivot_bounds::infinite_bounds());
assert(r == 0 || r == TOKUDB_TRY_AGAIN);
if (r == TOKUDB_TRY_AGAIN) {
assert(!unlockers.locked);
goto try_again;
}
invariant_zero(equal_right2);
invariant_zero(greater2);
// Update numbers.
// less is already correct.
// equal_left is already correct.
// "middle" currently holds everything greater than left_key in first query
// 'middle2' currently holds everything greater than right_key in second query
// 'equal_left2' is how many match right_key
// Prevent underflow.
if (middle >= equal_left2 + middle2) {
middle -= equal_left2 + middle2;
} else {
middle = 0;
}
equal_right = equal_left2;
greater = middle2;
invariant_zero(equal_right2);
invariant_zero(greater2);
// Update numbers.
// less is already correct.
// equal_left is already correct.
// "middle" currently holds everything greater than left_key in
// first query
// 'middle2' currently holds everything greater than right_key in
// second query
// 'equal_left2' is how many match right_key
// Prevent underflow.
if (middle >= equal_left2 + middle2) {
middle -= equal_left2 + middle2;
} else {
middle = 0;
}
equal_right = equal_left2;
greater = middle2;
}
assert(unlockers.locked);
toku_unpin_ftnode_read_only(ft_handle->ft, node);
if (!key_right) {
paranoid_invariant_zero(equal_right);
paranoid_invariant_zero(greater);
}
if (!key_left) {
paranoid_invariant_zero(less);
paranoid_invariant_zero(equal_left);
}
*less_p = less;
*equal_left_p = equal_left;
*middle_p = middle;
*equal_right_p = equal_right;
*greater_p = greater;
*middle_3_exact_p = single_basement_node;
}
assert(unlockers.locked);
toku_unpin_ftnode_read_only(ft_handle->ft, node);
if (!key_right) {
paranoid_invariant_zero(equal_right);
paranoid_invariant_zero(greater);
}
if (!key_left) {
paranoid_invariant_zero(less);
paranoid_invariant_zero(equal_left);
}
*less_p = less;
*equal_left_p = equal_left;
*middle_p = middle;
*equal_right_p = equal_right;
*greater_p = greater;
*middle_3_exact_p = single_basement_node;
}
}
struct get_key_after_bytes_iterate_extra {
......
......@@ -70,6 +70,8 @@ const char *toku_copyright_string = "Copyright (c) 2006, 2015, Percona and/or it
#include "util/status.h"
#include "util/context.h"
#include <functional>
// Include ydb_lib.cc here so that its constructor/destructor gets put into
// ydb.o, to make sure they don't get erased at link time (when linking to
// a static libtokufractaltree.a that was compiled with gcc). See #5094.
......@@ -1314,6 +1316,159 @@ static const char *env_get_data_dir(DB_ENV *env) {
return env->i->real_data_dir;
}
static int env_dirtool_attach(DB_ENV *env,
DB_TXN *txn,
const char *dname,
const char *iname) {
int r;
DBT dname_dbt;
DBT iname_dbt;
HANDLE_PANICKED_ENV(env);
if (!env_opened(env)) {
return EINVAL;
}
HANDLE_READ_ONLY_TXN(txn);
toku_fill_dbt(&dname_dbt, dname, strlen(dname) + 1);
toku_fill_dbt(&iname_dbt, iname, strlen(iname) + 1);
r = toku_db_put(env->i->directory,
txn,
&dname_dbt,
&iname_dbt,
0,
true);
return r;
}
static int env_dirtool_detach(DB_ENV *env,
DB_TXN *txn,
const char *dname) {
int r;
DBT dname_dbt;
DBT old_iname_dbt;
HANDLE_PANICKED_ENV(env);
if (!env_opened(env)) {
return EINVAL;
}
HANDLE_READ_ONLY_TXN(txn);
toku_fill_dbt(&dname_dbt, dname, strlen(dname) + 1);
toku_init_dbt_flags(&old_iname_dbt, DB_DBT_REALLOC);
r = toku_db_get(env->i->directory,
txn,
&dname_dbt,
&old_iname_dbt,
DB_SERIALIZABLE); // allocates memory for iname
if (r == DB_NOTFOUND)
return EEXIST;
toku_free(old_iname_dbt.data);
r = toku_db_del(env->i->directory, txn, &dname_dbt, DB_DELETE_ANY, true);
return r;
}
static int env_dirtool_move(DB_ENV *env,
DB_TXN *txn,
const char *old_dname,
const char *new_dname) {
int r;
DBT old_dname_dbt;
DBT new_dname_dbt;
DBT iname_dbt;
HANDLE_PANICKED_ENV(env);
if (!env_opened(env)) {
return EINVAL;
}
HANDLE_READ_ONLY_TXN(txn);
toku_fill_dbt(&old_dname_dbt, old_dname, strlen(old_dname) + 1);
toku_fill_dbt(&new_dname_dbt, new_dname, strlen(new_dname) + 1);
toku_init_dbt_flags(&iname_dbt, DB_DBT_REALLOC);
r = toku_db_get(env->i->directory,
txn,
&old_dname_dbt,
&iname_dbt,
DB_SERIALIZABLE); // allocates memory for iname
if (r == DB_NOTFOUND)
return EEXIST;
r = toku_db_del(
env->i->directory, txn, &old_dname_dbt, DB_DELETE_ANY, true);
if (r != 0)
goto exit;
r = toku_db_put(
env->i->directory, txn, &new_dname_dbt, &iname_dbt, 0, true);
exit:
toku_free(iname_dbt.data);
return r;
}
static int locked_env_op(DB_ENV *env,
DB_TXN *txn,
std::function<int(DB_TXN *)> f) {
int ret, r;
HANDLE_READ_ONLY_TXN(txn);
HANDLE_ILLEGAL_WORKING_PARENT_TXN(env, txn);
DB_TXN *child_txn = NULL;
int using_txns = env->i->open_flags & DB_INIT_TXN;
if (using_txns) {
ret = toku_txn_begin(env, txn, &child_txn, 0);
lazy_assert_zero(ret);
}
// cannot begin a checkpoint
toku_multi_operation_client_lock();
r = f(child_txn);
toku_multi_operation_client_unlock();
if (using_txns) {
if (r == 0) {
ret = locked_txn_commit(child_txn, 0);
lazy_assert_zero(ret);
} else {
ret = locked_txn_abort(child_txn);
lazy_assert_zero(ret);
}
}
return r;
}
static int locked_env_dirtool_attach(DB_ENV *env,
DB_TXN *txn,
const char *dname,
const char *iname) {
auto f = std::bind(
env_dirtool_attach, env, std::placeholders::_1, dname, iname);
return locked_env_op(env, txn, f);
}
static int locked_env_dirtool_detach(DB_ENV *env,
DB_TXN *txn,
const char *dname) {
auto f = std::bind(
env_dirtool_detach, env, std::placeholders::_1, dname);
return locked_env_op(env, txn, f);
}
static int locked_env_dirtool_move(DB_ENV *env,
DB_TXN *txn,
const char *old_dname,
const char *new_dname) {
auto f = std::bind(
env_dirtool_move, env, std::placeholders::_1, old_dname, new_dname);
return locked_env_op(env, txn, f);
}
static int env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, uint32_t flags);
static int
......@@ -2646,6 +2801,9 @@ toku_env_create(DB_ENV ** envp, uint32_t flags) {
#define SENV(name) result->name = locked_env_ ## name
SENV(dbremove);
SENV(dbrename);
SENV(dirtool_attach);
SENV(dirtool_detach);
SENV(dirtool_move);
//SENV(set_noticecall);
#undef SENV
#define USENV(name) result->name = env_ ## name
......@@ -2975,8 +3133,10 @@ env_dbremove(DB_ENV * env, DB_TXN *txn, const char *fname, const char *dbname, u
if (txn && r) {
if (r == EMFILE || r == ENFILE)
r = toku_ydb_do_error(env, r, "toku dbremove failed because open file limit reached\n");
else
else if (r != ENOENT)
r = toku_ydb_do_error(env, r, "toku dbremove failed\n");
else
r = 0;
goto exit;
}
if (txn) {
......
......@@ -5255,17 +5255,17 @@ int ha_tokudb::fill_range_query_buf(
DEBUG_SYNC(ha_thd(), "tokudb_icp_asc_scan_out_of_range");
goto cleanup;
} else if (result == ICP_NO_MATCH) {
// if we are performing a DESC ICP scan and have no end_range
// to compare to stop using ICP filtering as there isn't much more
// that we can do without going through contortions with remembering
// and comparing key parts.
// Optimizer change for MyRocks also benefits us here in TokuDB as
// opt_range.cc QUICK_SELECT::get_next now sets end_range during
// descending scan. We should not ever hit this condition, but
// leaving this code in to prevent any possibility of a descending
// scan to the beginning of an index and catch any possibility
// in debug builds with an assertion
assert_debug(!(!end_range && direction < 0));
if (!end_range &&
direction < 0) {
cancel_pushed_idx_cond();
DEBUG_SYNC(ha_thd(), "tokudb_icp_desc_scan_invalidate");
}
error = TOKUDB_CURSOR_CONTINUE;
goto cleanup;
}
......@@ -6123,7 +6123,6 @@ int ha_tokudb::info(uint flag) {
stats.records = share->row_count() + share->rows_from_locked_table;
stats.deleted = 0;
if (!(flag & HA_STATUS_NO_LOCK)) {
uint64_t num_rows = 0;
error = txn_begin(db_env, NULL, &txn, DB_READ_UNCOMMITTED, ha_thd());
if (error) {
......@@ -6133,20 +6132,13 @@ int ha_tokudb::info(uint flag) {
// we should always have a primary key
assert_always(share->file != NULL);
error = estimate_num_rows(share->file, &num_rows, txn);
if (error == 0) {
share->set_row_count(num_rows, false);
stats.records = num_rows;
} else {
goto cleanup;
}
DB_BTREE_STAT64 dict_stats;
error = share->file->stat64(share->file, txn, &dict_stats);
if (error) {
goto cleanup;
}
share->set_row_count(dict_stats.bt_ndata, false);
stats.records = dict_stats.bt_ndata;
stats.create_time = dict_stats.bt_create_time_sec;
stats.update_time = dict_stats.bt_modify_time_sec;
stats.check_time = dict_stats.bt_verify_time_sec;
......@@ -7849,7 +7841,7 @@ ha_rows ha_tokudb::records_in_range(uint keynr, key_range* start_key, key_range*
// As a result, equal may be 0 and greater may actually be equal+greater
// So, we call key_range64 on the key, and the key that is after it.
if (!start_key && !end_key) {
error = estimate_num_rows(kfile, &rows, transaction);
error = estimate_num_rows(share->file, &rows, transaction);
if (error) {
ret_val = HA_TOKUDB_RANGE_COUNT;
goto cleanup;
......
SET GLOBAL tokudb_dir_per_db=ON;
CREATE PROCEDURE create_table()
BEGIN
CREATE TABLE test.t1 (
a INT
) ENGINE = TokuDB
PARTITION BY RANGE (a)
(PARTITION p100 VALUES LESS THAN (100) ENGINE = TokuDB,
PARTITION p_to_del VALUES LESS THAN (200) ENGINE = TokuDB,
PARTITION p300 VALUES LESS THAN (300) ENGINE = TokuDB,
PARTITION p400 VALUES LESS THAN (400) ENGINE = TokuDB
);
END|
### Create partitioned table
CALL create_table();
## Looking for *.tokudb files in data_dir
## Looking for *.tokudb files in data_dir/test
t1_P_p100_main_id.tokudb
t1_P_p100_status_id.tokudb
t1_P_p300_main_id.tokudb
t1_P_p300_status_id.tokudb
t1_P_p400_main_id.tokudb
t1_P_p400_status_id.tokudb
t1_P_p_to_del_main_id.tokudb
t1_P_p_to_del_status_id.tokudb
### Stop server
### Remove 'main' file of one of the partitions
### Start server
### Make sure 'main' partition file is deleted
## Looking for *.tokudb files in data_dir
## Looking for *.tokudb files in data_dir/test
t1_P_p100_main_id.tokudb
t1_P_p100_status_id.tokudb
t1_P_p300_main_id.tokudb
t1_P_p300_status_id.tokudb
t1_P_p400_main_id.tokudb
t1_P_p400_status_id.tokudb
t1_P_p_to_del_status_id.tokudb
### Make sure the table still exists
SHOW TABLES;
Tables_in_test
t1
### Drop table
DROP TABLE t1;
### Make sure the table is dropped
SHOW TABLES;
Tables_in_test
### Check what files still exist after DROP TABLE
## Looking for *.tokudb files in data_dir
## Looking for *.tokudb files in data_dir/test
### Remove the rest of the files
### Make sure there are no tokudb files
## Looking for *.tokudb files in data_dir
## Looking for *.tokudb files in data_dir/test
### Create the same table once more
CALL create_table();
## Looking for *.tokudb files in data_dir
## Looking for *.tokudb files in data_dir/test
t1_P_p100_main_id.tokudb
t1_P_p100_status_id.tokudb
t1_P_p300_main_id.tokudb
t1_P_p300_status_id.tokudb
t1_P_p400_main_id.tokudb
t1_P_p400_status_id.tokudb
t1_P_p_to_del_main_id.tokudb
t1_P_p_to_del_status_id.tokudb
### Restore state
DROP TABLE t1;
DROP PROCEDURE create_table;
SET GLOBAL tokudb_dir_per_db=default;
SET GLOBAL tokudb_dir_per_db = ON;
SET tokudb_dir_cmd = "attach test_dname_1 test_iname_1";
SET tokudb_dir_cmd = "attach test_dname_2 test_iname_2";
SELECT dictionary_name, internal_file_name
FROM information_schema.TokuDB_file_map;
dictionary_name internal_file_name
test_dname_1 test_iname_1
test_dname_2 test_iname_2
SET tokudb_dir_cmd = "detach test_dname_1";
SELECT dictionary_name, internal_file_name
FROM information_schema.TokuDB_file_map;
dictionary_name internal_file_name
test_dname_2 test_iname_2
SET tokudb_dir_cmd = "move test_dname_2 test_dname_3";
SELECT dictionary_name, internal_file_name
FROM information_schema.TokuDB_file_map;
dictionary_name internal_file_name
test_dname_3 test_iname_2
SET tokudb_dir_cmd = "detach test_dname_3";
SELECT dictionary_name, internal_file_name
FROM information_schema.TokuDB_file_map;
dictionary_name internal_file_name
CREATE TABLE t1(a int) ENGINE=tokudb;
INSERT INTO t1 (a) VALUES (10);
SELECT dictionary_name, internal_file_name
FROM information_schema.TokuDB_file_map;
dictionary_name internal_file_name
./test/t1-main ./test/t1_main_id.tokudb
./test/t1-status ./test/t1_status_id.tokudb
SET tokudb_dir_cmd = "attach ./test/t1-main test/t1-main-renamed.tokudb";
SELECT dictionary_name, internal_file_name
FROM information_schema.TokuDB_file_map;
dictionary_name internal_file_name
./test/t1-main test/t1-main-renamed.tokudb
./test/t1-status ./test/t1_status_id.tokudb
### rename t1_main_id.tokudb to t1-main-renamed.tokudb
SELECT * FROM t1;
a
10
### Test for errors notification
SET tokudb_dir_cmd = "detach foo";
ERROR 42000: Variable 'tokudb_dir_cmd' can't be set to the value of 'detach foo'
SELECT @@tokudb_dir_cmd_last_error;
@@tokudb_dir_cmd_last_error
17
SELECT @@tokudb_dir_cmd_last_error_string;
@@tokudb_dir_cmd_last_error_string
detach command error
SET @@tokudb_dir_cmd_last_error_string = "blablabla";
SELECT @@tokudb_dir_cmd_last_error_string;
@@tokudb_dir_cmd_last_error_string
blablabla
SET STATEMENT tokudb_dir_cmd_last_error_string = "statement_blablabla" FOR
SELECT @@tokudb_dir_cmd_last_error_string;
@@tokudb_dir_cmd_last_error_string
statement_blablabla
DROP TABLE t1;
SET GLOBAL tokudb_dir_per_db = default;
# See https://bugs.launchpad.net/percona-server/+bug/1657908
source include/have_tokudb.inc;
SET GLOBAL tokudb_dir_per_db=ON;
--let $DB= test
--let $DATADIR= `SELECT @@datadir`
--delimiter |
CREATE PROCEDURE create_table()
BEGIN
CREATE TABLE test.t1 (
a INT
) ENGINE = TokuDB
PARTITION BY RANGE (a)
(PARTITION p100 VALUES LESS THAN (100) ENGINE = TokuDB,
PARTITION p_to_del VALUES LESS THAN (200) ENGINE = TokuDB,
PARTITION p300 VALUES LESS THAN (300) ENGINE = TokuDB,
PARTITION p400 VALUES LESS THAN (400) ENGINE = TokuDB
);
END|
--delimiter ;
--echo ### Create partitioned table
CALL create_table();
--source dir_per_db_show_table_files.inc
--echo ### Stop server
--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
--shutdown_server
--source include/wait_until_disconnected.inc
--echo ### Remove 'main' file of one of the partitions
--remove_files_wildcard $DATADIR/$DB t1_P_p_to_del_main_*.tokudb
--echo ### Start server
--enable_reconnect
--exec echo "restart: --loose-tokudb-dir-per-db=ON" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
--source include/wait_until_connected_again.inc
--echo ### Make sure 'main' partition file is deleted
--source dir_per_db_show_table_files.inc
--echo ### Make sure the table still exists
SHOW TABLES;
--echo ### Drop table
# error 1051 was here before the fix
DROP TABLE t1;
--echo ### Make sure the table is dropped
SHOW TABLES;
--echo ### Check what files still exist after DROP TABLE
--source dir_per_db_show_table_files.inc
--echo ### Remove the rest of the files
--remove_files_wildcard $DATADIR/$DB *.tokudb
--echo ### Make sure there are no tokudb files
--source dir_per_db_show_table_files.inc
--echo ### Create the same table once more
# engine error 17 (EEXIST) was here before the fix
CALL create_table();
--source dir_per_db_show_table_files.inc
--echo ### Restore state
DROP TABLE t1;
DROP PROCEDURE create_table;
SET GLOBAL tokudb_dir_per_db=default;
source include/have_tokudb.inc;
--let $MYSQL_DATADIR=`select @@datadir`
SET GLOBAL tokudb_dir_per_db = ON;
SET tokudb_dir_cmd = "attach test_dname_1 test_iname_1";
SET tokudb_dir_cmd = "attach test_dname_2 test_iname_2";
SELECT dictionary_name, internal_file_name
FROM information_schema.TokuDB_file_map;
SET tokudb_dir_cmd = "detach test_dname_1";
SELECT dictionary_name, internal_file_name
FROM information_schema.TokuDB_file_map;
SET tokudb_dir_cmd = "move test_dname_2 test_dname_3";
SELECT dictionary_name, internal_file_name
FROM information_schema.TokuDB_file_map;
SET tokudb_dir_cmd = "detach test_dname_3";
SELECT dictionary_name, internal_file_name
FROM information_schema.TokuDB_file_map;
CREATE TABLE t1(a int) ENGINE=tokudb;
INSERT INTO t1 (a) VALUES (10);
--source include/table_files_replace_pattern.inc
SELECT dictionary_name, internal_file_name
FROM information_schema.TokuDB_file_map;
SET tokudb_dir_cmd = "attach ./test/t1-main test/t1-main-renamed.tokudb";
--source include/table_files_replace_pattern.inc
SELECT dictionary_name, internal_file_name
FROM information_schema.TokuDB_file_map;
--echo ### rename t1_main_id.tokudb to t1-main-renamed.tokudb
--exec mv $MYSQL_DATADIR/test/t1_main_*.tokudb $MYSQL_DATADIR/test/t1-main-renamed.tokudb
SELECT * FROM t1;
--echo ### Test for errors notification
--error 1231
SET tokudb_dir_cmd = "detach foo";
SELECT @@tokudb_dir_cmd_last_error;
SELECT @@tokudb_dir_cmd_last_error_string;
SET @@tokudb_dir_cmd_last_error_string = "blablabla";
SELECT @@tokudb_dir_cmd_last_error_string;
SET STATEMENT tokudb_dir_cmd_last_error_string = "statement_blablabla" FOR
SELECT @@tokudb_dir_cmd_last_error_string;
DROP TABLE t1;
SET GLOBAL tokudb_dir_per_db = default;
###
# Test for binlog position
#####
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
CREATE TABLE t1(a INT) ENGINE=TokuDB;
DROP TABLE t1;
Backup
include/filter_file.inc
### tokubackup_slave_info content:
host: #.#.#.#, user: ####, port: ####, master log file: ####, relay log file: ####, exec master log pos: ####, executed gtid set: , channel name:
###
# Test for gtid set
#####
include/rpl_set_gtid_mode.inc
CREATE TABLE t1(a INT) ENGINE=TokuDB;
DROP TABLE t1;
Backup
include/filter_file.inc
### tokubackup_slave_info content:
host: #.#.#.#, user: ####, port: ####, master log file: ####, relay log file: ####, exec master log pos: ####, executed gtid set: ####, channel name:
include/rpl_set_gtid_mode.inc
include/rpl_end.inc
### Create backup dir
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
### Check for settings
SELECT @@gtid_mode;
@@gtid_mode
OFF
### Generate some binlog events
CREATE TABLE t1(a INT) ENGINE=TokuDB;
DROP TABLE t1;
### Master backup
include/filter_file.inc
### tokubackup_binlog_info content:
filename: ####, position: ####, gtid_mode: OFF, GTID of last change:
### Delete backup dir
### Create backup dir
### GTID-mode on
include/rpl_set_gtid_mode.inc
### Check for settings
SELECT @@gtid_mode;
@@gtid_mode
ON
### Generate some binlog events
CREATE TABLE t1(a INT) ENGINE=TokuDB;
DROP TABLE t1;
### Master backup
include/filter_file.inc
### tokubackup_binlog_info content:
filename: ####, position: ####, gtid_mode: ON, GTID of last change: #####
### Delete backup dir
### GTID-mode off
include/rpl_set_gtid_mode.inc
include/rpl_end.inc
SELECT @@innodb_use_native_aio;
@@innodb_use_native_aio
1
SET SESSION tokudb_backup_dir='MYSQL_TMP_DIR/tokudb_backup';
ERROR 42000: Variable 'tokudb_backup_dir' can't be set to the value of 'MYSQL_TMP_DIR/tokudb_backup'
###
# Master-slave test
####
include/rpl_init.inc [topology=1->2]
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
### Create temp table on master
CREATE TEMPORARY TABLE t1 (a INT);
include/sync_slave_sql_with_master.inc
### Setup debug_sync points and prepare for slave backup
SET SESSION debug="+d,debug_sync_abort_on_timeout";
SHOW STATUS LIKE 'Slave_open_temp_tables';
Variable_name Value
Slave_open_temp_tables 1
SET DEBUG_SYNC= 'tokudb_backup_wait_for_safe_slave_entered SIGNAL sse WAIT_FOR sse_continue';
SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_begin SIGNAL ttlb WAIT_FOR ttlb_continue';
SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_slave_started SIGNAL ttlss WAIT_FOR ttlss_continue EXECUTE 2';
SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_end SIGNAL ttle WAIT_FOR ttle_continue';
### Turn-on safe-slave option
SET GLOBAL tokudb_backup_safe_slave=ON;
SET GLOBAL tokudb_backup_safe_slave_timeout=30;
### Start slave backup
SET SESSION debug="+d,debug_sync_abort_on_timeout";
### Wait for safe slave function to start
SET DEBUG_SYNC = "now WAIT_FOR sse";
SHOW STATUS LIKE 'Slave_open_temp_tables';
Variable_name Value
Slave_open_temp_tables 1
### Wait for safe slave loop start
SET DEBUG_SYNC = "now SIGNAL sse_continue WAIT_FOR ttlb";
SHOW STATUS LIKE 'Slave_open_temp_tables';
Variable_name Value
Slave_open_temp_tables 1
### Wait for safe thread loop point just after slave sql thread start 1
SET DEBUG_SYNC = "now SIGNAL ttlb_continue WAIT_FOR ttlss";
SHOW STATUS LIKE 'Slave_open_temp_tables';
Variable_name Value
Slave_open_temp_tables 1
### Wait for safe thread loop end
SET DEBUG_SYNC = "now SIGNAL ttlss_continue WAIT_FOR ttle";
SHOW STATUS LIKE 'Slave_open_temp_tables';
Variable_name Value
Slave_open_temp_tables 1
### Wait for safe thread loop point just after slave sql thread start 2
SET DEBUG_SYNC = "now SIGNAL ttle_continue WAIT_FOR ttlss";
### Drop temp table on master
DROP TABLE t1;
### and syncronize slave
include/sync_slave_sql_with_master.inc
SHOW STATUS LIKE 'Slave_open_temp_tables';
Variable_name Value
Slave_open_temp_tables 0
### Continue backup
SET DEBUG_SYNC = "now SIGNAL ttlss_continue";
## Reset debug_sync points
SET DEBUG_SYNC = "RESET";
### Wait for backup finish
include/filter_file.inc
### Slave tokubackup_slave_info content:
host: #.#.#.#, user: ####, port: ####, master log file: ####, relay log file: ####, exec master log pos: ####, executed gtid set: , channel name:
### Delete slave backup dir
### Turn-off safe-slave option for slave
SET GLOBAL tokudb_backup_safe_slave=default;
SET GLOBAL tokudb_backup_safe_slave_timeout=default;
### Turn-on safe-slave option for master
SET GLOBAL tokudb_backup_safe_slave=ON;
SET GLOBAL tokudb_backup_safe_slave_timeout=30;
### Backup master
### Turn-off safe-slave option for master
SET GLOBAL tokudb_backup_safe_slave=default;
SET GLOBAL tokudb_backup_safe_slave_timeout=default;
include/filter_file.inc
### Master tokubackup_binlog_info content:
filename: ####, position: ####, gtid_mode: OFF, GTID of last change:
### Delete master backup dir
include/rpl_end.inc
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
### Create some data on master
CREATE TABLE t1(a INT, b INT, PRIMARY KEY (a)) ENGINE=TokuDB;
INSERT INTO t1 SET a=100, b=100;
INSERT INTO t1 SET a=200, b=100;
INSERT INTO t1 SET a=300, b=100;
INSERT INTO t1 SET a=400, b=100;
INSERT INTO t1 SET a=500, b=100;
UPDATE t1 SET b = 200 WHERE a = 200;
DELETE FROM t1 WHERE a = 100;
SELECT * FROM t1;
a b
200 200
300 100
400 100
500 100
### Check for slave options
SELECT @@tokudb_commit_sync;
@@tokudb_commit_sync
0
SELECT @@tokudb_fsync_log_period;
@@tokudb_fsync_log_period
1000000
### Check data on slave after sync
SELECT * FROM t1;
a b
200 200
300 100
400 100
500 100
### Do backup on slave
### Check for errors
SELECT @@session.tokudb_backup_last_error;
@@session.tokudb_backup_last_error
0
SELECT @@session.tokudb_backup_last_error_string;
@@session.tokudb_backup_last_error_string
NULL
### Stop slave server
include/rpl_stop_server.inc [server_number=2]
### Restore backup
### Start slave server and slave threads
include/rpl_start_server.inc [server_number=2]
include/start_slave.inc
### Sync slave with master
### Check data on slave
SELECT * FROM t1;
a b
200 200
300 100
400 100
500 100
### Cleanup
DROP TABLE t1;
include/rpl_end.inc
--source include/have_tokudb_backup.inc
--source include/not_gtid_enabled.inc
--let $SLAVE_INFO_FILE=tokubackup_slave_info
--let $BACKUP_DIR_SLAVE=$MYSQL_TMP_DIR/tokudb_backup_slave
--let $SLAVE_INFO_FILE_PATH=$BACKUP_DIR_SLAVE/$SLAVE_INFO_FILE
--let DDIR=$BACKUP_DIR_SLAVE
# Settings for include/filter_file.inc
--delimiter |
let $script=
s{host: [^,]+,}{host: #.#.#.#,};
s{user: [^,]+,}{user: ####,};
s{port: [^,]+,}{port: ####,};
s{master log file: [^,]+,}{master log file: ####,};
s{relay log file: [^,]+,}{relay log file: ####,};
s{exec master log pos: [^,]+,}{exec master log pos: ####,};
s{executed gtid set: [^,]+, }{executed gtid set: ####, };
s{executed gtid set: [^,]+,[^,]+, }{executed gtid set: ####,####, };
|
--delimiter ;
--let $input_file = $SLAVE_INFO_FILE_PATH
--let $skip_column_names= 1
--echo ###
--echo # Test for binlog position
--echo #####
--mkdir $BACKUP_DIR_SLAVE
--source include/master-slave.inc
--connection master
CREATE TABLE t1(a INT) ENGINE=TokuDB;
DROP TABLE t1;
--sync_slave_with_master
--connection slave
--echo Backup
--disable_query_log
--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE'
--enable_query_log
--source include/filter_file.inc
--echo ### $SLAVE_INFO_FILE content:
--cat_file $SLAVE_INFO_FILE_PATH
--perl
use File::Path 'rmtree';
$DDIR=$ENV{"DDIR"};
rmtree([ "$DDIR" ]);
EOF
--echo ###
--echo # Test for gtid set
--echo #####
--mkdir $BACKUP_DIR_SLAVE
--let $rpl_server_numbers= 1,2
--let $rpl_set_enforce_gtid_consistency= 1
--source include/rpl_set_gtid_mode.inc
--connection master
CREATE TABLE t1(a INT) ENGINE=TokuDB;
DROP TABLE t1;
--sync_slave_with_master
--connection slave
--echo Backup
--disable_query_log
--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE'
--enable_query_log
--source include/filter_file.inc
--echo ### $SLAVE_INFO_FILE content:
--cat_file $SLAVE_INFO_FILE_PATH
--perl
use File::Path 'rmtree';
$DDIR=$ENV{"DDIR"};
rmtree([ "$DDIR" ]);
EOF
--let $rpl_gtid_mode= OFF
--let $rpl_set_enforce_gtid_consistency= 0
--let $rpl_server_numbers= 1,2
--let $rpl_skip_sync= 1
--source include/rpl_set_gtid_mode.inc
--source include/rpl_end.inc
--source include/have_tokudb_backup.inc
--source include/not_gtid_enabled.inc
--let $MASTER_STATE_FILE=tokubackup_binlog_info
--let $BACKUP_DIR_MASTER=$MYSQL_TMP_DIR/tokudb_backup_master
--let $MASTER_STATE_FILE_PATH=$BACKUP_DIR_MASTER/$MASTER_STATE_FILE
--let DDIR=$BACKUP_DIR_MASTER
# Settings for include/filter_file.inc
--delimiter |
let $script=
s{filename: [^,]+,}{filename: ####,};
s{position: [^,]+,}{position: ####,};
s{GTID of last change: [^ ]+}{GTID of last change: #####};
|
--delimiter ;
--let $input_file = $MASTER_STATE_FILE_PATH
--let $skip_column_names= 1
--echo ### Create backup dir
--mkdir $BACKUP_DIR_MASTER
--source include/master-slave.inc
--connection master
--echo ### Check for settings
SELECT @@gtid_mode;
--echo ### Generate some binlog events
CREATE TABLE t1(a INT) ENGINE=TokuDB;
DROP TABLE t1;
--echo ### Master backup
--disable_query_log
--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_MASTER'
--enable_query_log
--source include/filter_file.inc
--echo ### $MASTER_STATE_FILE content:
--cat_file $MASTER_STATE_FILE_PATH
--echo ### Delete backup dir
--perl
use File::Path 'rmtree';
$DDIR=$ENV{"DDIR"};
rmtree([ "$DDIR" ]);
EOF
--echo ### Create backup dir
--mkdir $BACKUP_DIR_MASTER
--echo ### GTID-mode on
--let $rpl_server_numbers= 1,2
--let $rpl_set_enforce_gtid_consistency= 1
--source include/rpl_set_gtid_mode.inc
--echo ### Check for settings
SELECT @@gtid_mode;
--echo ### Generate some binlog events
CREATE TABLE t1(a INT) ENGINE=TokuDB;
DROP TABLE t1;
--echo ### Master backup
--disable_query_log
--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_MASTER'
--enable_query_log
--source include/filter_file.inc
--echo ### $MASTER_STATE_FILE content:
--cat_file $MASTER_STATE_FILE_PATH
--echo ### Delete backup dir
--perl
use File::Path 'rmtree';
$DDIR=$ENV{"DDIR"};
rmtree([ "$DDIR" ]);
EOF
--echo ### GTID-mode off
--let $rpl_gtid_mode= OFF
--let $rpl_set_enforce_gtid_consistency= 0
--let $rpl_server_numbers= 1,2
--source include/rpl_set_gtid_mode.inc
--source include/rpl_end.inc
--source include/have_tokudb_backup.inc
--source include/not_gtid_enabled.inc
--let $SLAVE_INFO_FILE=tokubackup_slave_info
--let $BACKUP_DIR_SLAVE=$MYSQL_TMP_DIR/tokudb_backup_slave
--let DDIR=$BACKUP_DIR_SLAVE
--mkdir $BACKUP_DIR_SLAVE
--echo Backup
--disable_query_log
--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE'
--enable_query_log
--list_files $BACKUP_DIR_SLAVE $SLAVE_INFO_FILE
--perl
use File::Path 'rmtree';
$DDIR=$ENV{"DDIR"};
rmtree([ "$DDIR" ]);
EOF
# Check if tokudb hot backup is prevented if innodb_use_native_aio enabled
--source include/have_tokudb_backup.inc
--source include/have_innodb.inc
SELECT @@innodb_use_native_aio;
--let BACKUP_DIR= $MYSQL_TMP_DIR/tokudb_backup
--mkdir $BACKUP_DIR
--replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR
--error ER_WRONG_VALUE_FOR_VAR
--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR'
--perl
use File::Path 'rmtree';
$DDIR=$ENV{"BACKUP_DIR"};
rmtree([ "$DDIR" ]);
EOF
--master-info-repository=TABLE --relay-log-info-repository=TABLE
!include ../../rpl/my.cnf
[mysqld.1]
[mysqld.2]
[mysqld.3]
master-info-repository=TABLE
relay-log-info-repository=TABLE
[ENV]
SERVER_MYPORT_3= @mysqld.3.port
SERVER_MYSOCK_3= @mysqld.3.socket
--connection server_1
--echo ### Create temp table on master
CREATE TEMPORARY TABLE t1 (a INT);
--let $sync_slave_connection= server_2
--source include/sync_slave_sql_with_master.inc
--echo ### Setup debug_sync points and prepare for slave backup
--connection slave_2
SET SESSION debug="+d,debug_sync_abort_on_timeout";
SHOW STATUS LIKE 'Slave_open_temp_tables';
SET DEBUG_SYNC= 'tokudb_backup_wait_for_safe_slave_entered SIGNAL sse WAIT_FOR sse_continue';
SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_begin SIGNAL ttlb WAIT_FOR ttlb_continue';
SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_slave_started SIGNAL ttlss WAIT_FOR ttlss_continue EXECUTE 2';
SET DEBUG_SYNC= 'tokudb_backup_wait_for_temp_tables_loop_end SIGNAL ttle WAIT_FOR ttle_continue';
--mkdir $BACKUP_DIR_SLAVE
--echo ### Turn-on safe-slave option
SET GLOBAL tokudb_backup_safe_slave=ON;
SET GLOBAL tokudb_backup_safe_slave_timeout=30;
--echo ### Start slave backup
--disable_query_log
--send_eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE'
--enable_query_log
--connection server_2
SET SESSION debug="+d,debug_sync_abort_on_timeout";
--echo ### Wait for safe slave function to start
SET DEBUG_SYNC = "now WAIT_FOR sse";
SHOW STATUS LIKE 'Slave_open_temp_tables';
--echo ### Wait for safe slave loop start
SET DEBUG_SYNC = "now SIGNAL sse_continue WAIT_FOR ttlb";
SHOW STATUS LIKE 'Slave_open_temp_tables';
--echo ### Wait for safe thread loop point just after slave sql thread start 1
SET DEBUG_SYNC = "now SIGNAL ttlb_continue WAIT_FOR ttlss";
SHOW STATUS LIKE 'Slave_open_temp_tables';
--echo ### Wait for safe thread loop end
SET DEBUG_SYNC = "now SIGNAL ttlss_continue WAIT_FOR ttle";
SHOW STATUS LIKE 'Slave_open_temp_tables';
--echo ### Wait for safe thread loop point just after slave sql thread start 2
SET DEBUG_SYNC = "now SIGNAL ttle_continue WAIT_FOR ttlss";
--connection server_1
--echo ### Drop temp table on master
DROP TABLE t1;
--echo ### and syncronize slave
--let $sync_slave_connection= server_2
--source include/sync_slave_sql_with_master.inc
SHOW STATUS LIKE 'Slave_open_temp_tables';
--echo ### Continue backup
SET DEBUG_SYNC = "now SIGNAL ttlss_continue";
--echo ## Reset debug_sync points
SET DEBUG_SYNC = "RESET";
--connection slave_2
--echo ### Wait for backup finish
--reap
--let $input_file = $S_SLAVE_INFO_FILE_PATH
--source include/filter_file.inc
--echo ### Slave $SLAVE_INFO_FILE content:
--cat_file $S_SLAVE_INFO_FILE_PATH
--echo ### Delete slave backup dir
--perl
use File::Path 'rmtree';
$DDIR=$ENV{"BACKUP_DIR_SLAVE"};
rmtree([ "$DDIR" ]);
EOF
--echo ### Turn-off safe-slave option for slave
SET GLOBAL tokudb_backup_safe_slave=default;
SET GLOBAL tokudb_backup_safe_slave_timeout=default;
--connection server_1
--echo ### Turn-on safe-slave option for master
SET GLOBAL tokudb_backup_safe_slave=ON;
SET GLOBAL tokudb_backup_safe_slave_timeout=30;
--echo ### Backup master
--mkdir $BACKUP_DIR_MASTER
--disable_query_log
--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_MASTER'
--enable_query_log
--echo ### Turn-off safe-slave option for master
SET GLOBAL tokudb_backup_safe_slave=default;
SET GLOBAL tokudb_backup_safe_slave_timeout=default;
--let $input_file = $M_MASTER_INFO_FILE_PATH
--source include/filter_file.inc
--echo ### Master $MASTER_INFO_FILE content:
--cat_file $M_MASTER_INFO_FILE_PATH
--echo ### Delete master backup dir
--perl
use File::Path 'rmtree';
$DDIR=$ENV{"BACKUP_DIR_MASTER"};
rmtree([ "$DDIR" ]);
EOF
--source include/have_tokudb_backup.inc
--source include/have_binlog_format_statement.inc
--source include/have_debug_sync.inc
--let $SLAVE_INFO_FILE=tokubackup_slave_info
--let $MASTER_INFO_FILE=tokubackup_binlog_info
--let BACKUP_DIR_SLAVE=$MYSQL_TMP_DIR/tokudb_backup_slave
--let $S_SLAVE_INFO_FILE_PATH=$BACKUP_DIR_SLAVE/$SLAVE_INFO_FILE
--let BACKUP_DIR_MASTER=$MYSQL_TMP_DIR/tokudb_backup_master
--let $M_MASTER_INFO_FILE_PATH=$BACKUP_DIR_MASTER/$MASTER_INFO_FILE
# Settings for include/filter_file.inc
--delimiter |
let $script=
s{filename: [^,]+,}{filename: ####,};
s{position: [^,]+,}{position: ####,};
s{GTID of last change: [^ ]+}{GTID of last change: #####};
s{host: [^,]+,}{host: #.#.#.#,};
s{user: [^,]+,}{user: ####,};
s{port: [^,]+,}{port: ####,};
s{master log file: [^,]+,}{master log file: ####,};
s{relay log file: [^,]+,}{relay log file: ####,};
s{exec master log pos: [^,]+,}{exec master log pos: ####,};
s{executed gtid set: [^,]+, }{executed gtid set: ####, };
s{executed gtid set: [^,]+,[^,]+, }{executed gtid set: ####,####, };
|
--delimiter ;
--let $skip_column_names= 1
--disable_query_log
CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
CALL mtr.add_suppression("Sending passwords in plain text without SSL/TLS is extremely insecure");
--enable_query_log
--echo ###
--echo # Master-slave test
--echo ####
--let $rpl_server_count=3
--let $rpl_topology=1->2
--source include/rpl_init.inc
--connect (slave_2,localhost,root,,test,$SLAVE_MYPORT,$SLAVE_MYSOCK)
--source rpl_safe_slave.inc
--source include/rpl_end.inc
--loose-tokudb-commit-sync=OFF --loose-tokudb-fsync-log-period=1000000
# if --tokudb-commit-sync is off on slave tokudb log must be flushed on backup
# to provide the ability to restore replication after backup restoring
--source include/have_tokudb_backup.inc
--let $BACKUP_DIR_SLAVE= $MYSQL_TMP_DIR/tokudb_backup_slave
--let $BACKUP_MYSQL_DATA_DIR= $BACKUP_DIR_SLAVE/mysql_data_dir
--mkdir $BACKUP_DIR_SLAVE
--source include/master-slave.inc
--echo ### Create some data on master
--connection master
CREATE TABLE t1(a INT, b INT, PRIMARY KEY (a)) ENGINE=TokuDB;
INSERT INTO t1 SET a=100, b=100;
INSERT INTO t1 SET a=200, b=100;
INSERT INTO t1 SET a=300, b=100;
INSERT INTO t1 SET a=400, b=100;
INSERT INTO t1 SET a=500, b=100;
UPDATE t1 SET b = 200 WHERE a = 200;
DELETE FROM t1 WHERE a = 100;
SELECT * FROM t1;
--sync_slave_with_master
--let $SLAVE_DATA_DIR=`SELECT @@DATADIR`
--echo ### Check for slave options
SELECT @@tokudb_commit_sync;
SELECT @@tokudb_fsync_log_period;
--echo ### Check data on slave after sync
SELECT * FROM t1;
--echo ### Do backup on slave
--disable_query_log
--eval SET SESSION tokudb_backup_dir='$BACKUP_DIR_SLAVE'
--enable_query_log
--echo ### Check for errors
SELECT @@session.tokudb_backup_last_error;
SELECT @@session.tokudb_backup_last_error_string;
--echo ### Stop slave server
--connection slave
--let $rpl_server_number= 2
--let $rpl_force_stop= 1
--source include/rpl_stop_server.inc
--echo ### Restore backup
--exec rm -rf $SLAVE_DATA_DIR;
--exec mv $BACKUP_MYSQL_DATA_DIR $SLAVE_DATA_DIR;
--echo ### Start slave server and slave threads
--connection slave
--source include/rpl_start_server.inc
--source include/start_slave.inc
--echo ### Sync slave with master
--connection master
--sync_slave_with_master
--echo ### Check data on slave
SELECT * FROM t1;
--echo ### Cleanup
--connection master
DROP TABLE t1;
--source include/rpl_end.inc
$TOKUDB_OPT $TOKUDB_LOAD_ADD_PATH $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD_PATH --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M
$TOKUDB_OPT $TOKUDB_LOAD_ADD_PATH $TOKUDB_BACKUP_OPT $TOKUDB_BACKUP_LOAD_ADD_PATH --loose-innodb_use_native_aio=off --loose-tokudb-check-jemalloc=0 --loose-tokudb-cache-size=512M --loose-tokudb-block-size=1M
......@@ -14,16 +14,6 @@ INSERT INTO t1 VALUES(1, 1, '1', '1'), (2, 2, '2', '2'), (3, 3, '3', '3'), (4, 4
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
set DEBUG_SYNC = 'tokudb_icp_desc_scan_invalidate SIGNAL hit1 WAIT_FOR done1';
SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id DESC;
set DEBUG_SYNC = 'now WAIT_FOR hit1';
set DEBUG_SYNC = 'now SIGNAL done1';
c
8
7
6
6
5
set DEBUG_SYNC = 'tokudb_icp_asc_scan_out_of_range SIGNAL hit2 WAIT_FOR done2';
SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id ASC;
set DEBUG_SYNC = 'now WAIT_FOR hit2';
......
......@@ -29,24 +29,6 @@ ANALYZE TABLE t1;
# lets flip to another connection
connect(conn1, localhost, root);
# set up the DEBUG_SYNC point
set DEBUG_SYNC = 'tokudb_icp_desc_scan_invalidate SIGNAL hit1 WAIT_FOR done1';
# send the query
send SELECT c FROM t1 WHERE id BETWEEN 5 AND 8 ORDER BY id DESC;
# back to default connection
connection default;
# wait for the ICP reverse scan to invalidate
set DEBUG_SYNC = 'now WAIT_FOR hit1';
# lets release and clean up
set DEBUG_SYNC = 'now SIGNAL done1';
connection conn1;
reap;
# set up the DEBUG_SYNC point again, but for the out of range
set DEBUG_SYNC = 'tokudb_icp_asc_scan_out_of_range SIGNAL hit2 WAIT_FOR done2';
......
......@@ -132,6 +132,7 @@ CREATE TABLE `t2` (
);
LOAD DATA INFILE 'leak172_t1.data' INTO TABLE `t1` fields terminated by ',';
remove_file $MYSQLD_DATADIR/test/leak172_t1.data;
connect(conn1,localhost,root,,);
set session debug_dbug="+d,tokudb_end_bulk_insert_sleep";
......@@ -145,6 +146,7 @@ UPDATE t1, t2 SET t1.`c5` = 4 WHERE t1.`c6` <= 'o';
connection conn1;
reap;
remove_file $MYSQLD_DATADIR/test/leak172_t2.data;
connection default;
disconnect conn1;
......
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
/* -*- mode: C; c-basic-offset: 4 -*- */
#ident "$Id$"
/*======
This file is part of TokuDB
Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
TokuDBis is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2,
as published by the Free Software Foundation.
TokuDB is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
#include "hatoku_hton.h"
#include "tokudb_dir_cmd.h"
#include "my_dbug.h"
#include "sql_base.h"
#include <vector>
#include <string>
namespace tokudb {
const char tokens_delimiter = ' ';
const char tokens_escape_delimiter_char = '\\';
static int MDL_and_TDC(THD *thd,
const char *db,
const char *table,
const dir_cmd_callbacks &cb) {
int error;
LEX_STRING db_arg;
LEX_STRING table_arg;
db_arg.str = const_cast<char *>(db);
db_arg.length = strlen(db);;
table_arg.str = const_cast<char *>(table);
table_arg.length = strlen(table);
Table_ident table_ident(thd, db_arg, table_arg, true);;
thd->lex->select_lex.add_table_to_list(
thd, &table_ident, NULL, 1, TL_UNLOCK, MDL_EXCLUSIVE, 0, 0, 0);
/* The lock will be released at the end of mysq_execute_command() */
error = lock_table_names(thd,
thd->lex->select_lex.table_list.first,
NULL,
thd->variables.lock_wait_timeout,
0);
if (error) {
if (cb.set_error)
cb.set_error(thd,
error,
"Can't lock table '%s.%s'",
db,
table);
return error;
}
tdc_remove_table(thd, TDC_RT_REMOVE_ALL, db, table, false);
return error;
}
static bool parse_db_and_table(const char *dname,
std::string /*out*/ &db_name,
std::string /*out*/ &table_name) {
const char *begin;
const char *end;
const char *db_name_begin;
const char *db_name_end;
begin = strchr(dname, '/');
if (!begin)
return false;
++begin;
end = strchr(begin, '/');
if (!end)
return false;
db_name_begin = begin;
db_name_end = end;
begin = end + 1;
end = strchr(begin, '-');
if (!end)
return false;
if (strncmp(end, "-main", strlen("-main")) &&
strncmp(end, "-status", strlen("-status")) &&
strncmp(end, "-key", strlen("-key")))
return false;
db_name.assign(db_name_begin, db_name_end);
table_name.assign(begin, end);
return true;
}
static int attach(THD *thd,
const std::string &dname,
const std::string &iname,
const dir_cmd_callbacks &cb) {
int error;
DB_TXN* txn = NULL;
DB_TXN *parent_txn = NULL;
tokudb_trx_data *trx = NULL;
std::string db_name;
std::string table_name;
if (parse_db_and_table(dname.c_str(), db_name, table_name)) {
error = MDL_and_TDC(thd, db_name.c_str(), table_name.c_str(), cb);
if (error)
goto cleanup;
}
trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
if (trx && trx->sub_sp_level)
parent_txn = trx->sub_sp_level;
error = txn_begin(db_env, parent_txn, &txn, 0, thd);
if (error)
goto cleanup;
error = db_env->dirtool_attach(db_env,
txn,
dname.c_str(),
iname.c_str());
cleanup:
if (txn) {
if (error) {
abort_txn(txn);
}
else {
commit_txn(txn, 0);
}
}
return error;
}
static int detach(THD *thd,
const std::string &dname,
const dir_cmd_callbacks &cb) {
int error;
DB_TXN* txn = NULL;
DB_TXN *parent_txn = NULL;
tokudb_trx_data *trx = NULL;
std::string db_name;
std::string table_name;
if (parse_db_and_table(dname.c_str(), db_name, table_name)) {
error = MDL_and_TDC(thd, db_name.c_str(), table_name.c_str(), cb);
if (error)
goto cleanup;
}
trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
if (trx && trx->sub_sp_level)
parent_txn = trx->sub_sp_level;
error = txn_begin(db_env, parent_txn, &txn, 0, thd);
if (error)
goto cleanup;
error = db_env->dirtool_detach(db_env,
txn,
dname.c_str());
cleanup:
if (txn) {
if (error) {
abort_txn(txn);
}
else {
commit_txn(txn, 0);
}
}
return error;
}
static int move(THD *thd,
const std::string &old_dname,
const std::string &new_dname,
const dir_cmd_callbacks &cb) {
int error;
DB_TXN* txn = NULL;
DB_TXN *parent_txn = NULL;
tokudb_trx_data *trx = NULL;
std::string db_name;
std::string table_name;
if (parse_db_and_table(old_dname.c_str(), db_name, table_name)) {
error = MDL_and_TDC(thd, db_name.c_str(), table_name.c_str(), cb);
if (error)
goto cleanup;
}
trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
if (trx && trx->sub_sp_level)
parent_txn = trx->sub_sp_level;
error = txn_begin(db_env, parent_txn, &txn, 0, thd);
if (error)
goto cleanup;
error = db_env->dirtool_move(db_env,
txn,
old_dname.c_str(),
new_dname.c_str());
cleanup:
if (txn) {
if (error) {
abort_txn(txn);
}
else {
commit_txn(txn, 0);
}
}
return error;
}
static void tokenize(const char *cmd_str,
std::vector<std::string> /*out*/ &tokens) {
DBUG_ASSERT(cmd_str);
bool was_escape = false;
const char *token_begin = cmd_str;
const char *token_end = token_begin;
while (*token_end) {
if (*token_end == tokens_escape_delimiter_char) {
was_escape = true;
}
else if (*token_end == tokens_delimiter) {
if (was_escape)
was_escape = false;
else {
if (token_begin == token_end)
++token_begin;
else {
tokens.push_back(std::string(token_begin, token_end));
token_begin = token_end + 1;
}
}
}
else {
was_escape = false;
}
++token_end;
}
if (token_begin != token_end)
tokens.push_back(std::string(token_begin, token_end));
}
void process_dir_cmd(THD *thd,
const char *cmd_str,
const dir_cmd_callbacks &cb) {
DBUG_ASSERT(thd);
DBUG_ASSERT(cmd_str);
std::vector<std::string> tokens;
tokenize(cmd_str, tokens);
if (tokens.empty())
return;
const std::string &cmd = tokens[0];
if (!cmd.compare("attach")) {
if (tokens.size() != 3) {
if (cb.set_error)
cb.set_error(thd,
EINVAL,
"attach command requires two arguments");
}
else {
int r = attach(thd, tokens[1], tokens[2], cb);
if (r && cb.set_error)
cb.set_error(thd, r, "Attach command error");
}
}
else if (!cmd.compare("detach")) {
if (tokens.size() != 2) {
if (cb.set_error)
cb.set_error(thd,
EINVAL,
"detach command requires one argument");
}
else {
int r = detach(thd, tokens[1], cb);
if (r && cb.set_error)
cb.set_error(thd, r, "detach command error");
}
}
else if (!cmd.compare("move")) {
if (tokens.size() != 3) {
if (cb.set_error)
cb.set_error(thd,
EINVAL,
"move command requires two arguments");
}
else {
int r = move(thd, tokens[1], tokens[2], cb);
if (r && cb.set_error)
cb.set_error(thd, r, "move command error");
}
}
else {
if (cb.set_error)
cb.set_error(thd,
ENOENT,
"Unknown command '%s'",
cmd.c_str());
}
return;
};
} // namespace tokudb
/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
#ident "$Id$"
/*======
This file is part of TokuDB
Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
TokuDBis is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2,
as published by the Free Software Foundation.
TokuDB is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TokuDB. If not, see <http://www.gnu.org/licenses/>.
======= */
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
#ifndef _TOKUDB_DIR_CMD_H
#define _TOKUDB_DIR_CMD_H
#include <sql_class.h>
namespace tokudb {
struct dir_cmd_callbacks {
void (*set_error)(THD *thd,
int error,
const char *error_fmt,
...);
};
void process_dir_cmd(THD *thd,
const char *cmd_str,
const dir_cmd_callbacks &cb);
};
#endif // _TOKUDB_DIR_CMD_H
......@@ -25,6 +25,9 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
#include "hatoku_hton.h"
#include "sql_acl.h"
#include "tokudb_dir_cmd.h"
#include "sql_parse.h"
namespace tokudb {
namespace sysvars {
......@@ -40,6 +43,8 @@ namespace sysvars {
#define TOKUDB_VERSION_STR NULL
#endif
const size_t error_buffer_max_size = 1024;
ulonglong cache_size = 0;
uint cachetable_pool_threads = 0;
int cardinality_scale_percent = 0;
......@@ -918,7 +923,72 @@ static MYSQL_THDVAR_BOOL(
true);
#endif
static int dir_cmd_check(THD* thd, struct st_mysql_sys_var* var,
void* save, struct st_mysql_value* value) ;
static MYSQL_THDVAR_INT(dir_cmd_last_error,
PLUGIN_VAR_THDLOCAL,
"error from the last dir command. 0 is success",
NULL, NULL, 0, 0, 0, 1);
static MYSQL_THDVAR_STR(dir_cmd_last_error_string,
PLUGIN_VAR_THDLOCAL + PLUGIN_VAR_MEMALLOC,
"error string from the last dir command",
NULL, NULL, NULL);
static MYSQL_THDVAR_STR(dir_cmd,
PLUGIN_VAR_THDLOCAL + PLUGIN_VAR_MEMALLOC,
"name of the directory where the backup is stored",
dir_cmd_check, NULL, NULL);
static void dir_cmd_set_error(THD *thd,
int error,
const char *error_fmt,
...) {
char buff[error_buffer_max_size];
va_list varargs;
assert(thd);
assert(error_fmt);
va_start(varargs, error_fmt);
vsnprintf(buff, sizeof(buff), error_fmt, varargs);
va_end(varargs);
#if 0 // Disable macros unavailable in MariaDB.
THDVAR_SET(thd, dir_cmd_last_error, &error);
THDVAR_SET(thd, dir_cmd_last_error_string, buff);
#endif
}
static int dir_cmd_check(THD* thd, struct st_mysql_sys_var* var,
void* save, struct st_mysql_value* value) {
int error = 0;
dir_cmd_set_error(thd, error, "");
if (check_global_access(thd, SUPER_ACL)) {
return 1;
}
char buff[STRING_BUFFER_USUAL_SIZE];
int length = sizeof(buff);
const char *str = value->val_str(value, buff, &length);
if (str) {
str = thd->strmake(str, length);
*(const char**)save = str;
}
if (str) {
dir_cmd_callbacks callbacks { .set_error = dir_cmd_set_error };
process_dir_cmd(thd, str, callbacks);
error = THDVAR(thd, dir_cmd_last_error);
} else {
error = EINVAL;
}
return error;
}
//******************************************************************************
// all system variables
......@@ -949,7 +1019,6 @@ st_mysql_sys_var* system_variables[] = {
MYSQL_SYSVAR(version),
MYSQL_SYSVAR(write_status_frequency),
MYSQL_SYSVAR(dir_per_db),
#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
MYSQL_SYSVAR(gdb_path),
MYSQL_SYSVAR(gdb_on_fatal),
......@@ -1008,6 +1077,9 @@ st_mysql_sys_var* system_variables[] = {
#if TOKUDB_DEBUG
MYSQL_SYSVAR(debug_pause_background_job_manager),
#endif // TOKUDB_DEBUG
MYSQL_SYSVAR(dir_cmd_last_error),
MYSQL_SYSVAR(dir_cmd_last_error_string),
MYSQL_SYSVAR(dir_cmd),
NULL
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment