Commit ed839052 authored by Sergei Golubchik's avatar Sergei Golubchik

Merge tag 'tokudb-7.5.5' into bb-5.5-merge

parents 8e80f91f d8493f40
This diff was suppressed by a .gitattributes entry.
...@@ -172,6 +172,31 @@ static inline uint32_t get_len_of_offsets(KEY_AND_COL_INFO* kc_info, TABLE_SHARE ...@@ -172,6 +172,31 @@ static inline uint32_t get_len_of_offsets(KEY_AND_COL_INFO* kc_info, TABLE_SHARE
} }
static int get_thread_query_string(my_thread_id id, String &qs) {
mysql_mutex_lock(&LOCK_thread_count);
I_List_iterator<THD> it(threads);
THD* tmp;
while ((tmp= it++))
{
/* ID */
if (tmp->thread_id == id)
{
/* Lock THD mutex that protects its data when looking at it. */
mysql_mutex_lock(&tmp->LOCK_thd_data);
/* INFO */
if (tmp->query())
{
qs = String(tmp->query(), tmp->query_length(), system_charset_info);
}
mysql_mutex_unlock(&tmp->LOCK_thd_data);
break;
}
}
mysql_mutex_unlock(&LOCK_thread_count);
return 0;
}
static int allocate_key_and_col_info ( TABLE_SHARE* table_share, KEY_AND_COL_INFO* kc_info) { static int allocate_key_and_col_info ( TABLE_SHARE* table_share, KEY_AND_COL_INFO* kc_info) {
int error; int error;
// //
...@@ -3557,8 +3582,12 @@ static void maybe_do_unique_checks_delay(THD *thd) { ...@@ -3557,8 +3582,12 @@ static void maybe_do_unique_checks_delay(THD *thd) {
} }
} }
static bool need_read_only(THD *thd) {
return opt_readonly || !THDVAR(thd, rpl_check_readonly);
}
static bool do_unique_checks(THD *thd, bool do_rpl_event) { static bool do_unique_checks(THD *thd, bool do_rpl_event) {
if (do_rpl_event && thd->slave_thread && opt_readonly && !THDVAR(thd, rpl_unique_checks)) if (do_rpl_event && thd->slave_thread && need_read_only(thd) && !THDVAR(thd, rpl_unique_checks))
return false; return false;
else else
return !thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS); return !thd_test_options(thd, OPTION_RELAXED_UNIQUE_CHECKS);
...@@ -5378,9 +5407,12 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_ ...@@ -5378,9 +5407,12 @@ int ha_tokudb::get_next(uchar* buf, int direction, DBT* key_to_compare, bool do_
} }
if (!error) { if (!error) {
tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(ha_thd(), tokudb_hton); THD *thd = ha_thd();
tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
trx->stmt_progress.queried++; trx->stmt_progress.queried++;
track_progress(ha_thd()); track_progress(thd);
if (thd_killed(thd))
error = ER_ABORTING_CONNECTION;
} }
cleanup: cleanup:
return error; return error;
...@@ -7253,7 +7285,7 @@ double ha_tokudb::index_only_read_time(uint keynr, double records) { ...@@ -7253,7 +7285,7 @@ double ha_tokudb::index_only_read_time(uint keynr, double records) {
// HA_POS_ERROR - Something is wrong with the index tree // HA_POS_ERROR - Something is wrong with the index tree
// //
ha_rows ha_tokudb::records_in_range(uint keynr, key_range* start_key, key_range* end_key) { ha_rows ha_tokudb::records_in_range(uint keynr, key_range* start_key, key_range* end_key) {
TOKUDB_HANDLER_DBUG_ENTER(""); TOKUDB_HANDLER_DBUG_ENTER("%d %p %p", keynr, start_key, end_key);
DBT *pleft_key, *pright_key; DBT *pleft_key, *pright_key;
DBT left_key, right_key; DBT left_key, right_key;
ha_rows ret_val = HA_TOKUDB_RANGE_COUNT; ha_rows ret_val = HA_TOKUDB_RANGE_COUNT;
...@@ -7309,6 +7341,9 @@ ha_rows ha_tokudb::records_in_range(uint keynr, key_range* start_key, key_range* ...@@ -7309,6 +7341,9 @@ ha_rows ha_tokudb::records_in_range(uint keynr, key_range* start_key, key_range*
ret_val = (ha_rows) (rows <= 1 ? 1 : rows); ret_val = (ha_rows) (rows <= 1 ? 1 : rows);
cleanup: cleanup:
if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
TOKUDB_HANDLER_TRACE("%" PRIu64 " %" PRIu64, (uint64_t) ret_val, rows);
}
DBUG_RETURN(ret_val); DBUG_RETURN(ret_val);
} }
......
...@@ -89,6 +89,8 @@ PATENT RIGHTS GRANT: ...@@ -89,6 +89,8 @@ PATENT RIGHTS GRANT:
#ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved." #ident "Copyright (c) 2007-2013 Tokutek Inc. All rights reserved."
#ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it." #ident "The technology is licensed by the Massachusetts Institute of Technology, Rutgers State University of New Jersey, and the Research Foundation of State University of New York at Stony Brook under United States of America Serial No. 11/760379 and to the patents and/or patent applications resulting from it."
#include "toku_time.h"
struct analyze_progress_extra { struct analyze_progress_extra {
THD *thd; THD *thd;
TOKUDB_SHARE *share; TOKUDB_SHARE *share;
...@@ -186,9 +188,12 @@ typedef struct hot_optimize_context { ...@@ -186,9 +188,12 @@ typedef struct hot_optimize_context {
uint progress_stage; uint progress_stage;
uint current_table; uint current_table;
uint num_tables; uint num_tables;
float progress_limit;
uint64_t progress_last_time;
uint64_t throttle;
} *HOT_OPTIMIZE_CONTEXT; } *HOT_OPTIMIZE_CONTEXT;
static int hot_poll_fun(void *extra, float progress) { static int hot_optimize_progress_fun(void *extra, float progress) {
HOT_OPTIMIZE_CONTEXT context = (HOT_OPTIMIZE_CONTEXT)extra; HOT_OPTIMIZE_CONTEXT context = (HOT_OPTIMIZE_CONTEXT)extra;
if (thd_killed(context->thd)) { if (thd_killed(context->thd)) {
sprintf(context->write_status_msg, "The process has been killed, aborting hot optimize."); sprintf(context->write_status_msg, "The process has been killed, aborting hot optimize.");
...@@ -207,14 +212,27 @@ static int hot_poll_fun(void *extra, float progress) { ...@@ -207,14 +212,27 @@ static int hot_poll_fun(void *extra, float progress) {
// the percentage we report here is for the current stage/db // the percentage we report here is for the current stage/db
thd_progress_report(context->thd, (unsigned long long) percentage, 100); thd_progress_report(context->thd, (unsigned long long) percentage, 100);
#endif #endif
return 0;
// throttle the optimize table
if (context->throttle) {
uint64_t time_now = toku_current_time_microsec();
uint64_t dt = time_now - context->progress_last_time;
uint64_t throttle_time = 1000000ULL / context->throttle;
if (throttle_time > dt) {
usleep(throttle_time - dt);
}
context->progress_last_time = toku_current_time_microsec();
}
// return 1 if progress has reach the progress limit
return progress >= context->progress_limit;
} }
// flatten all DB's in this table, to do so, peform hot optimize on each db // flatten all DB's in this table, to do so, peform hot optimize on each db
int ha_tokudb::do_optimize(THD *thd) { int ha_tokudb::do_optimize(THD *thd) {
TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name); TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name);
int error = 0;
const char *orig_proc_info = tokudb_thd_get_proc_info(thd); const char *orig_proc_info = tokudb_thd_get_proc_info(thd);
int error;
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key); uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS #ifdef HA_TOKUDB_HAS_THD_PROGRESS
...@@ -225,11 +243,21 @@ int ha_tokudb::do_optimize(THD *thd) { ...@@ -225,11 +243,21 @@ int ha_tokudb::do_optimize(THD *thd) {
// for each DB, run optimize and hot_optimize // for each DB, run optimize and hot_optimize
for (uint i = 0; i < curr_num_DBs; i++) { for (uint i = 0; i < curr_num_DBs; i++) {
// only optimize the index if it matches the optimize_index_name session variable
const char *optimize_index_name = THDVAR(thd, optimize_index_name);
if (optimize_index_name) {
const char *this_index_name = i >= table_share->keys ? "primary" : table_share->key_info[i].name;
if (strcasecmp(optimize_index_name, this_index_name) != 0) {
continue;
}
}
DB* db = share->key_file[i]; DB* db = share->key_file[i];
error = db->optimize(db); error = db->optimize(db);
if (error) { if (error) {
goto cleanup; goto cleanup;
} }
struct hot_optimize_context hc; struct hot_optimize_context hc;
memset(&hc, 0, sizeof hc); memset(&hc, 0, sizeof hc);
hc.thd = thd; hc.thd = thd;
...@@ -237,8 +265,11 @@ int ha_tokudb::do_optimize(THD *thd) { ...@@ -237,8 +265,11 @@ int ha_tokudb::do_optimize(THD *thd) {
hc.ha = this; hc.ha = this;
hc.current_table = i; hc.current_table = i;
hc.num_tables = curr_num_DBs; hc.num_tables = curr_num_DBs;
hc.progress_limit = THDVAR(thd, optimize_index_fraction);
hc.progress_last_time = toku_current_time_microsec();
hc.throttle = THDVAR(thd, optimize_throttle);
uint64_t loops_run; uint64_t loops_run;
error = db->hot_optimize(db, NULL, NULL, hot_poll_fun, &hc, &loops_run); error = db->hot_optimize(db, NULL, NULL, hot_optimize_progress_fun, &hc, &loops_run);
if (error) { if (error) {
goto cleanup; goto cleanup;
} }
......
...@@ -162,6 +162,9 @@ PATENT RIGHTS GRANT: ...@@ -162,6 +162,9 @@ PATENT RIGHTS GRANT:
#define TOKU_INCLUDE_EXTENDED_KEYS 1 #define TOKU_INCLUDE_EXTENDED_KEYS 1
#define TOKU_INCLUDE_OPTION_STRUCTS 1 #define TOKU_INCLUDE_OPTION_STRUCTS 1
#define TOKU_CLUSTERING_IS_COVERING 1 #define TOKU_CLUSTERING_IS_COVERING 1
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
#else
#define TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING 1
#endif #endif
#define TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL 0 /* MariaDB 5.5 */ #define TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL 0 /* MariaDB 5.5 */
......
...@@ -1403,8 +1403,6 @@ static MYSQL_SYSVAR_STR(data_dir, tokudb_data_dir, PLUGIN_VAR_READONLY, "TokuDB ...@@ -1403,8 +1403,6 @@ static MYSQL_SYSVAR_STR(data_dir, tokudb_data_dir, PLUGIN_VAR_READONLY, "TokuDB
static MYSQL_SYSVAR_STR(version, tokudb_version, PLUGIN_VAR_READONLY, "TokuDB Version", NULL, NULL, NULL); static MYSQL_SYSVAR_STR(version, tokudb_version, PLUGIN_VAR_READONLY, "TokuDB Version", NULL, NULL, NULL);
static MYSQL_SYSVAR_UINT(init_flags, tokudb_init_flags, PLUGIN_VAR_READONLY, "Sets TokuDB DB_ENV->open flags", NULL, NULL, tokudb_init_flags, 0, ~0U, 0);
static MYSQL_SYSVAR_UINT(write_status_frequency, tokudb_write_status_frequency, 0, "TokuDB frequency that show processlist updates status of writes", NULL, NULL, 1000, 0, ~0U, 0); static MYSQL_SYSVAR_UINT(write_status_frequency, tokudb_write_status_frequency, 0, "TokuDB frequency that show processlist updates status of writes", NULL, NULL, 1000, 0, ~0U, 0);
static MYSQL_SYSVAR_UINT(read_status_frequency, tokudb_read_status_frequency, 0, "TokuDB frequency that show processlist updates status of reads", NULL, NULL, 10000, 0, ~0U, 0); static MYSQL_SYSVAR_UINT(read_status_frequency, tokudb_read_status_frequency, 0, "TokuDB frequency that show processlist updates status of reads", NULL, NULL, 10000, 0, ~0U, 0);
static MYSQL_SYSVAR_INT(fs_reserve_percent, tokudb_fs_reserve_percent, PLUGIN_VAR_READONLY, "TokuDB file system space reserve (percent free required)", NULL, NULL, 5, 0, 100, 0); static MYSQL_SYSVAR_INT(fs_reserve_percent, tokudb_fs_reserve_percent, PLUGIN_VAR_READONLY, "TokuDB file system space reserve (percent free required)", NULL, NULL, 5, 0, 100, 0);
...@@ -1441,7 +1439,6 @@ static struct st_mysql_sys_var *tokudb_system_variables[] = { ...@@ -1441,7 +1439,6 @@ static struct st_mysql_sys_var *tokudb_system_variables[] = {
MYSQL_SYSVAR(create_index_online), MYSQL_SYSVAR(create_index_online),
MYSQL_SYSVAR(disable_prefetching), MYSQL_SYSVAR(disable_prefetching),
MYSQL_SYSVAR(version), MYSQL_SYSVAR(version),
MYSQL_SYSVAR(init_flags),
MYSQL_SYSVAR(checkpointing_period), MYSQL_SYSVAR(checkpointing_period),
MYSQL_SYSVAR(prelock_empty), MYSQL_SYSVAR(prelock_empty),
MYSQL_SYSVAR(checkpoint_lock), MYSQL_SYSVAR(checkpoint_lock),
...@@ -1482,6 +1479,10 @@ static struct st_mysql_sys_var *tokudb_system_variables[] = { ...@@ -1482,6 +1479,10 @@ static struct st_mysql_sys_var *tokudb_system_variables[] = {
MYSQL_SYSVAR(rpl_unique_checks_delay), MYSQL_SYSVAR(rpl_unique_checks_delay),
MYSQL_SYSVAR(rpl_lookup_rows), MYSQL_SYSVAR(rpl_lookup_rows),
MYSQL_SYSVAR(rpl_lookup_rows_delay), MYSQL_SYSVAR(rpl_lookup_rows_delay),
MYSQL_SYSVAR(rpl_check_readonly),
MYSQL_SYSVAR(optimize_index_name),
MYSQL_SYSVAR(optimize_index_fraction),
MYSQL_SYSVAR(optimize_throttle),
NULL NULL
}; };
...@@ -1974,6 +1975,33 @@ static int tokudb_fractal_tree_block_map_done(void *p) { ...@@ -1974,6 +1975,33 @@ static int tokudb_fractal_tree_block_map_done(void *p) {
return 0; return 0;
} }
#if TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING
struct tokudb_search_txn_extra {
bool match_found;
uint64_t match_txn_id;
uint64_t match_client_id;
};
static int tokudb_search_txn_callback(uint64_t txn_id, uint64_t client_id, iterate_row_locks_callback iterate_locks, void *locks_extra, void *extra) {
struct tokudb_search_txn_extra *e = reinterpret_cast<struct tokudb_search_txn_extra *>(extra);
if (e->match_txn_id == txn_id) {
e->match_found = true;
e->match_client_id = client_id;
return 1;
}
return 0;
}
static bool tokudb_txn_id_to_client_id(THD *thd, uint64_t blocking_txnid, uint64_t *blocking_client_id) {
struct tokudb_search_txn_extra e = { false, blocking_txnid, 0};
(void) db_env->iterate_live_transactions(db_env, tokudb_search_txn_callback, &e);
if (e.match_found) {
*blocking_client_id = e.match_client_id;
}
return e.match_found;
}
#endif
static void tokudb_pretty_key(const DB *db, const DBT *key, const char *default_key, String *out) { static void tokudb_pretty_key(const DB *db, const DBT *key, const char *default_key, String *out) {
if (key->data == NULL) { if (key->data == NULL) {
out->append(default_key); out->append(default_key);
...@@ -2023,8 +2051,9 @@ static void tokudb_lock_timeout_callback(DB *db, uint64_t requesting_txnid, cons ...@@ -2023,8 +2051,9 @@ static void tokudb_lock_timeout_callback(DB *db, uint64_t requesting_txnid, cons
// generate a JSON document with the lock timeout info // generate a JSON document with the lock timeout info
String log_str; String log_str;
log_str.append("{"); log_str.append("{");
uint64_t mysql_thread_id = thd->thread_id;
log_str.append("\"mysql_thread_id\":"); log_str.append("\"mysql_thread_id\":");
log_str.append_ulonglong(thd->thread_id); log_str.append_ulonglong(mysql_thread_id);
log_str.append(", \"dbname\":"); log_str.append(", \"dbname\":");
log_str.append("\""); log_str.append(tokudb_get_index_name(db)); log_str.append("\""); log_str.append("\""); log_str.append(tokudb_get_index_name(db)); log_str.append("\"");
log_str.append(", \"requesting_txnid\":"); log_str.append(", \"requesting_txnid\":");
...@@ -2064,7 +2093,18 @@ static void tokudb_lock_timeout_callback(DB *db, uint64_t requesting_txnid, cons ...@@ -2064,7 +2093,18 @@ static void tokudb_lock_timeout_callback(DB *db, uint64_t requesting_txnid, cons
} }
// dump to stderr // dump to stderr
if (lock_timeout_debug & 2) { if (lock_timeout_debug & 2) {
sql_print_error("%s: %s", tokudb_hton_name, log_str.c_ptr()); sql_print_error("%s: lock timeout %s", tokudb_hton_name, log_str.c_ptr());
LEX_STRING *qs = thd_query_string(thd);
sql_print_error("%s: requesting_thread_id:%" PRIu64 " q:%.*s", tokudb_hton_name, mysql_thread_id, (int) qs->length, qs->str);
#if TOKU_INCLUDE_LOCK_TIMEOUT_QUERY_STRING
uint64_t blocking_thread_id = 0;
if (tokudb_txn_id_to_client_id(thd, blocking_txnid, &blocking_thread_id)) {
String blocking_qs;
if (get_thread_query_string(blocking_thread_id, blocking_qs) == 0) {
sql_print_error("%s: blocking_thread_id:%" PRIu64 " q:%.*s", tokudb_hton_name, blocking_thread_id, blocking_qs.length(), blocking_qs.c_ptr());
}
}
#endif
} }
} }
} }
......
...@@ -491,6 +491,15 @@ static MYSQL_THDVAR_BOOL(rpl_lookup_rows, PLUGIN_VAR_THDLOCAL, "lookup a row on ...@@ -491,6 +491,15 @@ static MYSQL_THDVAR_BOOL(rpl_lookup_rows, PLUGIN_VAR_THDLOCAL, "lookup a row on
static MYSQL_THDVAR_ULONGLONG(rpl_lookup_rows_delay, PLUGIN_VAR_THDLOCAL, "time in milliseconds to add to lookups on replication slave", static MYSQL_THDVAR_ULONGLONG(rpl_lookup_rows_delay, PLUGIN_VAR_THDLOCAL, "time in milliseconds to add to lookups on replication slave",
NULL, NULL, 0 /*default*/, 0 /*min*/, ~0ULL /*max*/, 1 /*blocksize*/); NULL, NULL, 0 /*default*/, 0 /*min*/, ~0ULL /*max*/, 1 /*blocksize*/);
static MYSQL_THDVAR_BOOL(rpl_check_readonly, PLUGIN_VAR_THDLOCAL, "check if the slave is read only",
NULL /*check*/, NULL /*update*/, true /*default*/);
static MYSQL_THDVAR_STR(optimize_index_name, PLUGIN_VAR_THDLOCAL + PLUGIN_VAR_MEMALLOC, "optimize index name (default all indexes)", NULL /*check*/, NULL /*update*/, NULL /*default*/);
static MYSQL_THDVAR_DOUBLE(optimize_index_fraction, 0, "optimize index fraction (default 1.0 all)", NULL /*check*/, NULL /*update*/, 1.0 /*def*/, 0 /*min*/, 1.0 /*max*/, 1);
static MYSQL_THDVAR_ULONGLONG(optimize_throttle, 0, "optimize throttle (default no throttle)", NULL /*check*/, NULL /*update*/, 0 /*def*/, 0 /*min*/, ~0ULL /*max*/, 1);
extern HASH tokudb_open_tables; extern HASH tokudb_open_tables;
extern pthread_mutex_t tokudb_mutex; extern pthread_mutex_t tokudb_mutex;
extern uint32_t tokudb_write_status_frequency; extern uint32_t tokudb_write_status_frequency;
......
...@@ -10,3 +10,6 @@ rpl_tokudb_write_pk: unreliable, uses timestamp differences ...@@ -10,3 +10,6 @@ rpl_tokudb_write_pk: unreliable, uses timestamp differences
rpl_tokudb_write_pk_uc1: unreliable, uses timestamp differences rpl_tokudb_write_pk_uc1: unreliable, uses timestamp differences
rpl_tokudb_write_unique: unreliable, uses timestamp differences rpl_tokudb_write_unique: unreliable, uses timestamp differences
rpl_tokudb_write_unique_uc1: unreliable, uses timestamp differences rpl_tokudb_write_unique_uc1: unreliable, uses timestamp differences
rpl_tokudb_read_only_ff: unreliable, uses timestamp differences
rpl_tokudb_read_only_tf: unreliable, uses timestamp differences
rpl_tokudb_read_only_tt: unreliable, uses timestamp differences
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, primary key(a)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
select unix_timestamp()-@tstart <= 10;
unix_timestamp()-@tstart <= 10
1
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, primary key(a)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
select unix_timestamp()-@tstart <= 10;
unix_timestamp()-@tstart <= 10
0
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, primary key(a)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
select unix_timestamp()-@tstart <= 10;
unix_timestamp()-@tstart <= 10
1
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
include/master-slave.inc
[connection master]
drop table if exists t;
create table t (a bigint not null, primary key(a)) engine=tokudb;
select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
select unix_timestamp()-@tstart <= 10;
unix_timestamp()-@tstart <= 10
1
include/diff_tables.inc [master:test.t, slave:test.t]
drop table if exists t;
include/rpl_end.inc
--read-only=OFF --tokudb-rpl-check-readonly=OFF --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=OFF
# test replicated write rows log events on a table with a primary key.
# the slave is read only with tokudb unique checks disabled.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_unique_checks%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, primary key(a)) engine=$engine;
# show create table t;
select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp()-@tstart <= 10;
connection slave;
# insert into t values (5); # test read-only
# show create table t;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
--read-only=OFF --tokudb-rpl-check-readonly=ON --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=OFF
# test replicated write rows log events on a table with a primary key.
# the slave is read only with tokudb unique checks disabled.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_unique_checks%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, primary key(a)) engine=$engine;
# show create table t;
select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp()-@tstart <= 10;
connection slave;
# insert into t values (5); # test read-only
# show create table t;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
--read-only=ON --tokudb-rpl-check-readonly=OFF --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=OFF
# test replicated write rows log events on a table with a primary key.
# the slave is read only with tokudb unique checks disabled.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_unique_checks%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, primary key(a)) engine=$engine;
# show create table t;
select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp()-@tstart <= 10;
connection slave;
# insert into t values (5); # test read-only
# show create table t;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
--read-only=ON --tokudb-rpl-check-readonly=ON --tokudb-rpl-unique-checks-delay=5000 --tokudb-rpl-unique-checks=OFF
# test replicated write rows log events on a table with a primary key.
# the slave is read only with tokudb unique checks disabled.
source include/have_tokudb.inc;
let $engine=tokudb;
source include/have_binlog_format_row.inc;
source include/master-slave.inc;
# initialize
connection master;
disable_warnings;
drop table if exists t;
enable_warnings;
connection slave;
# show variables like 'read_only';
# show variables like 'tokudb_rpl_unique_checks%';
# insert some rows
connection master;
# select @@binlog_format;
# select @@autocommit;
eval create table t (a bigint not null, primary key(a)) engine=$engine;
# show create table t;
select unix_timestamp() into @tstart;
insert into t values (1);
insert into t values (2),(3);
insert into t values (4);
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
connection master;
select unix_timestamp()-@tstart <= 10;
connection slave;
# insert into t values (5); # test read-only
# show create table t;
# diff tables
connection master;
--let $diff_tables= master:test.t, slave:test.t
source include/diff_tables.inc;
# cleanup
connection master;
drop table if exists t;
sync_slave_with_master;
# source include/sync_slave_sql_with_master.inc;
source include/rpl_end.inc;
set default_storage_engine='tokudb';
drop table if exists t;
create table t (a int, b int, c int, primary key(a), key(b), key(c));
set tokudb_optimize_index_name='primary';
optimize table t;
Table Op Msg_type Msg_text
test.t optimize status OK
set tokudb_optimize_index_name='b';
optimize table t;
Table Op Msg_type Msg_text
test.t optimize status OK
set tokudb_optimize_index_name='c';
optimize table t;
Table Op Msg_type Msg_text
test.t optimize status OK
drop table t;
set default_storage_engine=tokudb;
drop table if exists t;
create table t (id int not null primary key, c int not null) engine=tokudb;
insert into t values (1,0);
begin;
update t set c=10 where id=1;
update t set c=100;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
rollback;
drop table t;
create table t (id int not null primary key, c int not null) engine=tokudb partition by hash(id) partitions 1;
insert into t values (1,0);
begin;
update t set c=10 where id=1;
update t set c=100;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
rollback;
drop table t;
SET DEFAULT_STORAGE_ENGINE = 'tokudb';
DROP TABLE IF EXISTS t1;
create table t1 (i int, j int, primary key (i))engine=TokuDB;
insert into t1 values (0,0) MEMCACHE_DIRTY 'a';
insert into t1 values (1,0) MEMCACHE_DIRTY 'b', 'c';
update t1 set j=j+1 where i=0 MEMCACHE_DIRTY 'a';
update t1 set j=j+1 where i=1 MEMCACHE_DIRTY 'b', 'c';
insert into t1 values (0,0) on duplicate key update j=j+1 MEMCACHE_DIRTY 'a';
insert into t1 values (2,0) on duplicate key update j=j+1 MEMCACHE_DIRTY 'a', 'b';
replace into t1 values (0,3) MEMCACHE_DIRTY 'a';
replace into t1 values (3,3) MEMCACHE_DIRTY 'a', 'b';
delete from t1 where i=0 MEMCACHE_DIRTY 'a';
delete from t1 where i=1 MEMCACHE_DIRTY 'b', 'c';
DROP TABLE t1;
# test tokudb_optimize_index_name session variable
set default_storage_engine='tokudb';
source include/have_tokudb.inc;
disable_warnings;
drop table if exists t;
enable_warnings;
create table t (a int, b int, c int, primary key(a), key(b), key(c));
# optimize primary key
set tokudb_optimize_index_name='primary';
optimize table t;
# optimize key b
set tokudb_optimize_index_name='b';
optimize table t;
# optimize key c
set tokudb_optimize_index_name='c';
optimize table t;
drop table t;
# test for the DB-801 bug on mysql-5.5.41
source include/have_tokudb.inc;
source include/have_partition.inc;
set default_storage_engine=tokudb;
disable_warnings;
drop table if exists t;
enable_warnings;
# run the test on a tokudb table
create table t (id int not null primary key, c int not null) engine=tokudb;
insert into t values (1,0);
connect(conn1,localhost,root,,);
connection default;
begin;
update t set c=10 where id=1;
connection conn1;
--error ER_LOCK_WAIT_TIMEOUT
update t set c=100;
connection default;
rollback;
disconnect conn1;
drop table t;
# run the test on a partitioned tokudb table
create table t (id int not null primary key, c int not null) engine=tokudb partition by hash(id) partitions 1;
insert into t values (1,0);
connect(conn1,localhost,root,,);
connection default;
begin;
update t set c=10 where id=1;
connection conn1;
--error ER_LOCK_WAIT_TIMEOUT
update t set c=100;
connection default;
rollback;
disconnect conn1;
drop table t;
--source include/have_tokudb.inc
#
# Record inconsistency.
#
#
SET DEFAULT_STORAGE_ENGINE = 'tokudb';
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
create table t1 (i int, j int, primary key (i))engine=TokuDB;
insert into t1 values (0,0) MEMCACHE_DIRTY 'a'; insert into t1 values (1,0) MEMCACHE_DIRTY 'b', 'c';
update t1 set j=j+1 where i=0 MEMCACHE_DIRTY 'a'; update t1 set j=j+1 where i=1 MEMCACHE_DIRTY 'b', 'c';
insert into t1 values (0,0) on duplicate key update j=j+1 MEMCACHE_DIRTY 'a'; insert into t1 values (2,0) on duplicate key update j=j+1 MEMCACHE_DIRTY 'a', 'b';
replace into t1 values (0,3) MEMCACHE_DIRTY 'a'; replace into t1 values (3,3) MEMCACHE_DIRTY 'a', 'b';
delete from t1 where i=0 MEMCACHE_DIRTY 'a'; delete from t1 where i=1 MEMCACHE_DIRTY 'b', 'c';
# Final cleanup.
DROP TABLE t1;
...@@ -131,11 +131,20 @@ function parse_mysqlbuild() { ...@@ -131,11 +131,20 @@ function parse_mysqlbuild() {
tokudb_version=${BASH_REMATCH[6]} tokudb_version=${BASH_REMATCH[6]}
target_system=${BASH_REMATCH[7]} target_system=${BASH_REMATCH[7]}
target_arch=${BASH_REMATCH[8]} target_arch=${BASH_REMATCH[8]}
# verify targets # verify targets
if [ $target_system != $system ] ; then exitcode=1; fi if [ $target_system != $system ] ; then exitcode=1; fi
if [ $target_arch != $arch ] ; then exitcode=1; fi if [ $target_arch != $arch ] ; then exitcode=1; fi
# split the version string into major.minor.patch
if [[ $mysql_version =~ ^([0-9]+)\.([0-9]+)\.([0-9]+.*) ]] ; then
mysql_version_major=${BASH_REMATCH[1]}
mysql_version_minor=${BASH_REMATCH[2]}
mysql_version_patch=${BASH_REMATCH[3]}
fi
local temp_tokudb_version=$tokudb_version local temp_tokudb_version=$tokudb_version
# decode enterprise # decode enterprise
if [[ $temp_tokudb_version =~ (.*)-e$ ]] ; then if [[ $temp_tokudb_version =~ (.*)-e$ ]] ; then
build_type=enterprise build_type=enterprise
...@@ -143,6 +152,7 @@ function parse_mysqlbuild() { ...@@ -143,6 +152,7 @@ function parse_mysqlbuild() {
else else
build_type=community build_type=community
fi fi
# decode debug # decode debug
if [[ $temp_tokudb_version =~ (.*)-debug$ ]] ; then if [[ $temp_tokudb_version =~ (.*)-debug$ ]] ; then
build_debug=1 build_debug=1
...@@ -151,8 +161,9 @@ function parse_mysqlbuild() { ...@@ -151,8 +161,9 @@ function parse_mysqlbuild() {
else else
build_debug=0 build_debug=0
fi fi
# set tag or HEAD # set tag or HEAD
if [[ $temp_tokudb_version =~ ^([0-9]+)\\.([0-9]+)\\.([0-9]+) ]] ; then if [[ $temp_tokudb_version =~ ^([0-9]+)\.([0-9]+)\.([0-9]+) ]] ; then
git_tag=tokudb-$temp_tokudb_version git_tag=tokudb-$temp_tokudb_version
else else
git_tag=HEAD git_tag=HEAD
...@@ -160,6 +171,8 @@ function parse_mysqlbuild() { ...@@ -160,6 +171,8 @@ function parse_mysqlbuild() {
if [ -z $mysql_tree ] ; then mysql_tree=$mysql_distro-$mysql_version; fi if [ -z $mysql_tree ] ; then mysql_tree=$mysql_distro-$mysql_version; fi
if [ -z $jemalloc_tree ] ; then jemalloc_tree=$jemalloc_version; fi if [ -z $jemalloc_tree ] ; then jemalloc_tree=$jemalloc_version; fi
fi fi
# set repository
mysql_repo=$mysql_distro mysql_repo=$mysql_distro
if [[ $mysql_version =~ ^([0-9]+\.[0-9]+) ]] ; then mysql_repo=$mysql_distro-${BASH_REMATCH[1]}; else exitcode=1; fi if [[ $mysql_version =~ ^([0-9]+\.[0-9]+) ]] ; then mysql_repo=$mysql_distro-${BASH_REMATCH[1]}; else exitcode=1; fi
else else
...@@ -174,6 +187,15 @@ function parse_mysql() { ...@@ -174,6 +187,15 @@ function parse_mysql() {
if [[ $mysql =~ ^(mysql|mariadb)-(.*)$ ]] ; then if [[ $mysql =~ ^(mysql|mariadb)-(.*)$ ]] ; then
mysql_distro=${BASH_REMATCH[1]} mysql_distro=${BASH_REMATCH[1]}
mysql_version=${BASH_REMATCH[2]} mysql_version=${BASH_REMATCH[2]}
# split the version string into major.minor.patch
if [[ $mysql_version =~ ^([0-9]+)\.([0-9]+)\.([0-9]+.*) ]] ; then
mysql_version_major=${BASH_REMATCH[1]}
mysql_version_minor=${BASH_REMATCH[2]}
mysql_version_patch=${BASH_REMATCH[3]}
fi
# set repository
mysql_repo=$mysql_distro mysql_repo=$mysql_distro
if [[ $mysql_version =~ ^([0-9]+\.[0-9]+) ]] ; then mysql_repo=$mysql_distro-${BASH_REMATCH[1]}; else exitcode=1; fi if [[ $mysql_version =~ ^([0-9]+\.[0-9]+) ]] ; then mysql_repo=$mysql_distro-${BASH_REMATCH[1]}; else exitcode=1; fi
exitcode=0 exitcode=0
......
...@@ -103,10 +103,22 @@ fi ...@@ -103,10 +103,22 @@ fi
cd $mysql_distro-$mysql_version cd $mysql_distro-$mysql_version
if [ $? != 0 ] ; then exit 1; fi if [ $? != 0 ] ; then exit 1; fi
# extract mysql version patch number only
if [[ $mysql_version_patch =~ ^([0-9]+) ]] ; then p=${BASH_REMATCH[1]}; else p=$mysql_version_patch; fi
# install the backup source # install the backup source
if [ ! -d toku_backup ] ; then tokudb_backup=
if [ $mysql_version_major -eq 5 -a $mysql_version_minor -eq 5 -a $p -le 40 ] ; then
tokudb_backup=patch
github_download Tokutek/backup-$build_type $(git_tree $git_tag $backup_tree) backup-$build_type github_download Tokutek/backup-$build_type $(git_tree $git_tag $backup_tree) backup-$build_type
cp -r backup-$build_type/backup toku_backup cp -r backup-$build_type/backup toku_backup
elif [ $build_type = enterprise ] ; then
tokudb_backup=plugin
github_download Tokutek/tokudb-backup-plugin $(git_tree $git_tag $backup_tree) tokudb-backup-plugin
mv tokudb-backup-plugin plugin
github_download Tokutek/backup-enterprise $(git_tree $git_tag $backup_tree) backup-enterprise
mv backup-enterprise/backup plugin/tokudb-backup-plugin
rm -rf backup-enterprise
fi fi
if [ ! -d tokudb-engine ] ; then if [ ! -d tokudb-engine ] ; then
...@@ -153,6 +165,7 @@ function generate_cmake_cmd () { ...@@ -153,6 +165,7 @@ function generate_cmake_cmd () {
echo -n CC=$cc CXX=$cxx cmake \ echo -n CC=$cc CXX=$cxx cmake \
-D BUILD_CONFIG=mysql_release \ -D BUILD_CONFIG=mysql_release \
-D MYSQL_MAINTAINER_MODE=OFF \
-D CMAKE_BUILD_TYPE=$cmake_build_type \ -D CMAKE_BUILD_TYPE=$cmake_build_type \
-D CMAKE_TOKUDB_REVISION=$ft_revision \ -D CMAKE_TOKUDB_REVISION=$ft_revision \
-D TOKUDB_VERSION=tokudb-${tokudb_version} \ -D TOKUDB_VERSION=tokudb-${tokudb_version} \
......
...@@ -55,7 +55,7 @@ function github_clone() { ...@@ -55,7 +55,7 @@ function github_clone() {
git_tag= git_tag=
mysql=mysql-5.5 mysql=mysql-5.5
mysql_tree=mysql-5.5.35 mysql_tree=mysql-5.5.41
jemalloc=jemalloc jemalloc=jemalloc
jemalloc_tree=3.6.0 jemalloc_tree=3.6.0
tokudbengine=tokudb-engine tokudbengine=tokudb-engine
...@@ -148,13 +148,15 @@ fi ...@@ -148,13 +148,15 @@ fi
pushd $build_dir pushd $build_dir
if [ $? != 0 ] ; then exit 1; fi if [ $? != 0 ] ; then exit 1; fi
extra_cmake_options="-DCMAKE_LINK_DEPENDS_NO_SHARED=ON" extra_cmake_options="-DCMAKE_LINK_DEPENDS_NO_SHARED=ON"
extra_cmake_options+=" -DBUILD_TESTING=OFF"
extra_cmake_options+=" -DMYSQL_MAINTAINER_MODE=OFF"
if (( $cmake_valgrind )) ; then if (( $cmake_valgrind )) ; then
extra_cmake_options+=" -DUSE_VALGRIND=ON" extra_cmake_options+=" -DUSE_VALGRIND=ON"
fi fi
if (( $cmake_debug_paranoid )) ; then if (( $cmake_debug_paranoid )) ; then
extra_cmake_options+=" -DTOKU_DEBUG_PARANOID=ON" extra_cmake_options+=" -DTOKU_DEBUG_PARANOID=ON"
fi fi
CC=$cc CXX=$cxx cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=$install_dir -DBUILD_TESTING=OFF $extra_cmake_options ../$mysql_tree CC=$cc CXX=$cxx cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=$install_dir $extra_cmake_options ../$mysql_tree
if [ $? != 0 ] ; then exit 1; fi if [ $? != 0 ] ; then exit 1; fi
make -j4 install make -j4 install
if [ $? != 0 ] ; then exit 1; fi if [ $? != 0 ] ; then exit 1; fi
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment