Commit 454e974c authored by Rich Prohaska's avatar Rich Prohaska

#225 hot optimize for 5.6 and 10.0 using alter recreate

parent 676c38a8
...@@ -6299,7 +6299,7 @@ uint32_t ha_tokudb::get_cursor_isolation_flags(enum thr_lock_type lock_type, THD ...@@ -6299,7 +6299,7 @@ uint32_t ha_tokudb::get_cursor_isolation_flags(enum thr_lock_type lock_type, THD
lock (if we don't want to use MySQL table locks at all) or add locks lock (if we don't want to use MySQL table locks at all) or add locks
for many tables (like we do when we are using a MERGE handler). for many tables (like we do when we are using a MERGE handler).
Tokudb DB changes all WRITE locks to TL_WRITE_ALLOW_WRITE (which TokuDB changes all WRITE locks to TL_WRITE_ALLOW_WRITE (which
signals that we are doing WRITES, but we are still allowing other signals that we are doing WRITES, but we are still allowing other
reader's and writer's. reader's and writer's.
...@@ -6321,31 +6321,22 @@ THR_LOCK_DATA **ha_tokudb::store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_l ...@@ -6321,31 +6321,22 @@ THR_LOCK_DATA **ha_tokudb::store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_l
} }
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) { if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) {
// if creating a hot index enum_sql_command sql_command = (enum_sql_command) thd_sql_command(thd);
if (thd_sql_command(thd)== SQLCOM_CREATE_INDEX && get_create_index_online(thd)) { if (!thd->in_lock_tables) {
rw_rdlock(&share->num_DBs_lock); if (sql_command == SQLCOM_CREATE_INDEX && get_create_index_online(thd)) {
if (share->num_DBs == (table->s->keys + tokudb_test(hidden_primary_key))) { // hot indexing
lock_type = TL_WRITE_ALLOW_WRITE; rw_rdlock(&share->num_DBs_lock);
} if (share->num_DBs == (table->s->keys + tokudb_test(hidden_primary_key))) {
lock.type = lock_type; lock_type = TL_WRITE_ALLOW_WRITE;
rw_unlock(&share->num_DBs_lock); }
} rw_unlock(&share->num_DBs_lock);
} else if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) &&
// 5.5 supports reads concurrent with alter table. just use the default lock type. sql_command != SQLCOM_TRUNCATE && !thd_tablespace_op(thd)) {
#if MYSQL_VERSION_ID < 50500 // allow concurrent writes
else if (thd_sql_command(thd)== SQLCOM_CREATE_INDEX ||
thd_sql_command(thd)== SQLCOM_ALTER_TABLE ||
thd_sql_command(thd)== SQLCOM_DROP_INDEX) {
// force alter table to lock out other readers
lock_type = TL_WRITE;
lock.type = lock_type;
}
#endif
else {
// If we are not doing a LOCK TABLE, then allow multiple writers
if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) &&
!thd->in_lock_tables && thd_sql_command(thd) != SQLCOM_TRUNCATE && !thd_tablespace_op(thd)) {
lock_type = TL_WRITE_ALLOW_WRITE; lock_type = TL_WRITE_ALLOW_WRITE;
} else if (sql_command == SQLCOM_OPTIMIZE && lock_type == TL_READ_NO_INSERT) {
// hot optimize table
lock_type = TL_READ;
} }
lock.type = lock_type; lock.type = lock_type;
} }
......
...@@ -109,15 +109,6 @@ typedef struct loader_context { ...@@ -109,15 +109,6 @@ typedef struct loader_context {
ha_tokudb* ha; ha_tokudb* ha;
} *LOADER_CONTEXT; } *LOADER_CONTEXT;
typedef struct hot_optimize_context {
THD *thd;
char* write_status_msg;
ha_tokudb *ha;
uint progress_stage;
uint current_table;
uint num_tables;
} *HOT_OPTIMIZE_CONTEXT;
// //
// This object stores table information that is to be shared // This object stores table information that is to be shared
// among all ha_tokudb objects. // among all ha_tokudb objects.
...@@ -805,6 +796,7 @@ class ha_tokudb : public handler { ...@@ -805,6 +796,7 @@ class ha_tokudb : public handler {
void remove_from_trx_handler_list(); void remove_from_trx_handler_list();
private: private:
int do_optimize(THD *thd);
int map_to_handler_error(int error); int map_to_handler_error(int error);
}; };
......
...@@ -130,6 +130,12 @@ int ha_tokudb::analyze(THD *thd, HA_CHECK_OPT *check_opt) { ...@@ -130,6 +130,12 @@ int ha_tokudb::analyze(THD *thd, HA_CHECK_OPT *check_opt) {
TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name); TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name);
uint64_t rec_per_key[table_share->key_parts]; uint64_t rec_per_key[table_share->key_parts];
int result = HA_ADMIN_OK; int result = HA_ADMIN_OK;
// stub out analyze if optimize is remapped to alter recreate + analyze
if (thd_sql_command(thd) != SQLCOM_ANALYZE) {
TOKUDB_HANDLER_DBUG_RETURN(result);
}
DB_TXN *txn = transaction; DB_TXN *txn = transaction;
if (!txn) { if (!txn) {
result = HA_ADMIN_FAILED; result = HA_ADMIN_FAILED;
...@@ -171,6 +177,15 @@ int ha_tokudb::analyze(THD *thd, HA_CHECK_OPT *check_opt) { ...@@ -171,6 +177,15 @@ int ha_tokudb::analyze(THD *thd, HA_CHECK_OPT *check_opt) {
TOKUDB_HANDLER_DBUG_RETURN(result); TOKUDB_HANDLER_DBUG_RETURN(result);
} }
typedef struct hot_optimize_context {
THD *thd;
char* write_status_msg;
ha_tokudb *ha;
uint progress_stage;
uint current_table;
uint num_tables;
} *HOT_OPTIMIZE_CONTEXT;
static int hot_poll_fun(void *extra, float progress) { static int hot_poll_fun(void *extra, float progress) {
HOT_OPTIMIZE_CONTEXT context = (HOT_OPTIMIZE_CONTEXT)extra; HOT_OPTIMIZE_CONTEXT context = (HOT_OPTIMIZE_CONTEXT)extra;
if (context->thd->killed) { if (context->thd->killed) {
...@@ -193,10 +208,11 @@ static int hot_poll_fun(void *extra, float progress) { ...@@ -193,10 +208,11 @@ static int hot_poll_fun(void *extra, float progress) {
return 0; return 0;
} }
volatile int ha_tokudb_optimize_wait;
// flatten all DB's in this table, to do so, peform hot optimize on each db // flatten all DB's in this table, to do so, peform hot optimize on each db
int ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) { int ha_tokudb::do_optimize(THD *thd) {
TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name); TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name);
int error; int error;
uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key); uint curr_num_DBs = table->s->keys + tokudb_test(hidden_primary_key);
...@@ -206,9 +222,7 @@ int ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) { ...@@ -206,9 +222,7 @@ int ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) {
thd_progress_init(thd, curr_num_DBs); thd_progress_init(thd, curr_num_DBs);
#endif #endif
//
// for each DB, run optimize and hot_optimize // for each DB, run optimize and hot_optimize
//
for (uint i = 0; i < curr_num_DBs; i++) { for (uint i = 0; i < curr_num_DBs; i++) {
DB* db = share->key_file[i]; DB* db = share->key_file[i];
error = db->optimize(db); error = db->optimize(db);
...@@ -228,14 +242,24 @@ int ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) { ...@@ -228,14 +242,24 @@ int ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) {
goto cleanup; goto cleanup;
} }
} }
error = 0; error = 0;
cleanup:
cleanup:
while (ha_tokudb_optimize_wait) sleep(1);
#ifdef HA_TOKUDB_HAS_THD_PROGRESS #ifdef HA_TOKUDB_HAS_THD_PROGRESS
thd_progress_end(thd); thd_progress_end(thd);
#endif #endif
TOKUDB_HANDLER_DBUG_RETURN(error);
}
int ha_tokudb::optimize(THD *thd, HA_CHECK_OPT *check_opt) {
TOKUDB_HANDLER_DBUG_ENTER("%s", share->table_name);
int error;
#if TOKU_OPTIMIZE_WITH_RECREATE
error = HA_ADMIN_TRY_ALTER;
#else
error = do_optimize(thd);
#endif
TOKUDB_HANDLER_DBUG_RETURN(error); TOKUDB_HANDLER_DBUG_RETURN(error);
} }
......
...@@ -122,6 +122,7 @@ class tokudb_alter_ctx : public inplace_alter_handler_ctx { ...@@ -122,6 +122,7 @@ class tokudb_alter_ctx : public inplace_alter_handler_ctx {
expand_varchar_update_needed(false), expand_varchar_update_needed(false),
expand_fixed_update_needed(false), expand_fixed_update_needed(false),
expand_blob_update_needed(false), expand_blob_update_needed(false),
optimize_needed(false),
table_kc_info(NULL), table_kc_info(NULL),
altered_table_kc_info(NULL) { altered_table_kc_info(NULL) {
} }
...@@ -141,6 +142,7 @@ class tokudb_alter_ctx : public inplace_alter_handler_ctx { ...@@ -141,6 +142,7 @@ class tokudb_alter_ctx : public inplace_alter_handler_ctx {
bool expand_varchar_update_needed; bool expand_varchar_update_needed;
bool expand_fixed_update_needed; bool expand_fixed_update_needed;
bool expand_blob_update_needed; bool expand_blob_update_needed;
bool optimize_needed;
Dynamic_array<uint> changed_fields; Dynamic_array<uint> changed_fields;
KEY_AND_COL_INFO *table_kc_info; KEY_AND_COL_INFO *table_kc_info;
KEY_AND_COL_INFO *altered_table_kc_info; KEY_AND_COL_INFO *altered_table_kc_info;
...@@ -439,7 +441,13 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter(TABLE *alt ...@@ -439,7 +441,13 @@ enum_alter_inplace_result ha_tokudb::check_if_supported_inplace_alter(TABLE *alt
result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK; result = HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
} }
} }
}
#if TOKU_OPTIMIZE_WITH_RECREATE
else if (only_flags(ctx->handler_flags, Alter_inplace_info::RECREATE_TABLE + Alter_inplace_info::ALTER_COLUMN_DEFAULT)) {
ctx->optimize_needed = true;
result = HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE;
} }
#endif
if (result != HA_ALTER_INPLACE_NOT_SUPPORTED && table->s->null_bytes != altered_table->s->null_bytes && if (result != HA_ALTER_INPLACE_NOT_SUPPORTED && table->s->null_bytes != altered_table->s->null_bytes &&
(tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE)) { (tokudb_debug & TOKUDB_DEBUG_ALTER_TABLE)) {
...@@ -522,6 +530,9 @@ bool ha_tokudb::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha ...@@ -522,6 +530,9 @@ bool ha_tokudb::inplace_alter_table(TABLE *altered_table, Alter_inplace_info *ha
if (error == 0 && ctx->reset_card) { if (error == 0 && ctx->reset_card) {
error = tokudb::set_card_from_status(share->status_block, ctx->alter_txn, table->s, altered_table->s); error = tokudb::set_card_from_status(share->status_block, ctx->alter_txn, table->s, altered_table->s);
} }
if (error == 0 && ctx->optimize_needed) {
error = do_optimize(ha_thd());
}
#if (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \ #if (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
(50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799) (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
......
...@@ -112,6 +112,7 @@ PATENT RIGHTS GRANT: ...@@ -112,6 +112,7 @@ PATENT RIGHTS GRANT:
#define TOKU_INCLUDE_EXTENDED_KEYS 1 #define TOKU_INCLUDE_EXTENDED_KEYS 1
#endif #endif
#define TOKU_INCLUDE_OPTION_STRUCTS 1 #define TOKU_INCLUDE_OPTION_STRUCTS 1
#define TOKU_OPTIMIZE_WITH_RECREATE 1
#elif 50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799 #elif 50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799
// mysql 5.7 with no patches // mysql 5.7 with no patches
...@@ -134,17 +135,18 @@ PATENT RIGHTS GRANT: ...@@ -134,17 +135,18 @@ PATENT RIGHTS GRANT:
#define TOKU_PARTITION_WRITE_FRM_DATA 0 #define TOKU_PARTITION_WRITE_FRM_DATA 0
#else #else
// mysql 5.6 with tokutek patches // mysql 5.6 with tokutek patches
#define TOKU_USE_DB_TYPE_TOKUDB 1 /* has DB_TYPE_TOKUDB patch */ #define TOKU_USE_DB_TYPE_TOKUDB 1 // has DB_TYPE_TOKUDB patch
#define TOKU_INCLUDE_ALTER_56 1 #define TOKU_INCLUDE_ALTER_56 1
#define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 1 /* has tokudb row format compression patch */ #define TOKU_INCLUDE_ROW_TYPE_COMPRESSION 1 // has tokudb row format compression patch
#define TOKU_INCLUDE_XA 1 /* has patch that fixes TC_LOG_MMAP code */ #define TOKU_INCLUDE_XA 1 // has patch that fixes TC_LOG_MMAP code
#define TOKU_PARTITION_WRITE_FRM_DATA 0 #define TOKU_PARTITION_WRITE_FRM_DATA 0
#define TOKU_INCLUDE_WRITE_FRM_DATA 0 #define TOKU_INCLUDE_WRITE_FRM_DATA 0
#define TOKU_INCLUDE_UPSERT 1 /* has tokudb upsert patch */ #define TOKU_INCLUDE_UPSERT 1 // has tokudb upsert patch
#if defined(HTON_SUPPORTS_EXTENDED_KEYS) #if defined(HTON_SUPPORTS_EXTENDED_KEYS)
#define TOKU_INCLUDE_EXTENDED_KEYS 1 #define TOKU_INCLUDE_EXTENDED_KEYS 1
#endif #endif
#endif #endif
#define TOKU_OPTIMIZE_WITH_RECREATE 1
#elif 50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599 #elif 50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599
// mysql 5.5 and mariadb 5.5 // mysql 5.5 and mariadb 5.5
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment