Commit 65d01da2 authored by Sergei Petrunia's avatar Sergei Petrunia

Copy of

commit ba00e640f658ad8d0a4dff09a497a51b8a4de935
Author: Herman Lee <herman@fb.com>
Date:   Wed Feb 22 06:30:06 2017 -0800

    Improve add_index_alter_cardinality test

    Summary:
    Split add_index_inplace_cardinality test out and add a debug_sync point
    to it so that the flush of the memtable occurs while the alter is
    running.
    Closes https://github.com/facebook/mysql-5.6/pull/539

    Reviewed By: alxyang

    Differential Revision: D4597887

    Pulled By: hermanlee

    fbshipit-source-id: faedda2
parent 7468ccfa
---
Language: Cpp
# BasedOnStyle: LLVM
AccessModifierOffset: -2
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignEscapedNewlinesLeft: false
AlignOperands: true
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: false
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Attach
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: true
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ]
IncludeCategories:
- Regex: '^"(llvm|llvm-c|clang|clang-c)/'
Priority: 2
- Regex: '^(<|"(gtest|isl|json)/)'
Priority: 3
- Regex: '.*'
Priority: 1
IncludeIsMainRegex: '$'
IndentCaseLabels: false
IndentWidth: 2
IndentWrappedFunctionNames: false
KeepEmptyLinesAtTheStartOfBlocks: true
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakBeforeFirstCallParameter: 19
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
ReflowComments: true
SortIncludes: true
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp11
TabWidth: 8
UseTab: Never
JavaScriptQuotes: Leave
...
......@@ -28,6 +28,11 @@ INCLUDE_DIRECTORIES(
ADD_DEFINITIONS(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DOS_LINUX
-DZLIB)
CHECK_FUNCTION_EXISTS(fallocate HAVE_FALLOCATE)
IF(HAVE_FALLOCATE)
ADD_DEFINITIONS(-DROCKSDB_FALLOCATE_PRESENT)
ENDIF()
SET(ROCKSDB_SOURCES
ha_rocksdb.cc ha_rocksdb.h ha_rocksdb_proto.h
logger.h
......@@ -41,6 +46,7 @@ SET(ROCKSDB_SOURCES
rdb_index_merge.cc rdb_index_merge.h
rdb_perf_context.cc rdb_perf_context.h
rdb_mutex_wrapper.cc rdb_mutex_wrapper.h
rdb_psi.h rdb_psi.cc
rdb_sst_info.cc rdb_sst_info.h
rdb_utils.cc rdb_utils.h rdb_buff.h
rdb_threads.cc rdb_threads.h
......
......@@ -59,6 +59,7 @@
#include "./rdb_i_s.h"
#include "./rdb_index_merge.h"
#include "./rdb_mutex_wrapper.h"
#include "./rdb_psi.h"
#include "./rdb_threads.h"
// Internal MySQL APIs not exposed in any header.
......@@ -302,7 +303,7 @@ static void rocksdb_set_pause_background_work(
my_core::THD *const thd MY_ATTRIBUTE((__unused__)),
struct st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)),
void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) {
mysql_mutex_lock(&rdb_sysvars_mutex);
RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex);
const bool pause_requested = *static_cast<const bool *>(save);
if (rocksdb_pause_background_work != pause_requested) {
if (pause_requested) {
......@@ -312,7 +313,7 @@ static void rocksdb_set_pause_background_work(
}
rocksdb_pause_background_work = pause_requested;
}
mysql_mutex_unlock(&rdb_sysvars_mutex);
RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex);
}
static void rocksdb_set_compaction_options(THD *thd,
......@@ -329,6 +330,10 @@ static void rocksdb_set_rate_limiter_bytes_per_sec(THD *thd,
void *var_ptr,
const void *save);
static void rocksdb_set_delayed_write_rate(THD *thd,
struct st_mysql_sys_var *var,
void *var_ptr, const void *save);
static void rdb_set_collation_exception_list(const char *exception_list);
static void rocksdb_set_collation_exception_list(THD *thd,
struct st_mysql_sys_var *var,
......@@ -350,14 +355,16 @@ static long long rocksdb_block_cache_size;
/* Use unsigned long long instead of uint64_t because of MySQL compatibility */
static unsigned long long // NOLINT(runtime/int)
rocksdb_rate_limiter_bytes_per_sec;
static unsigned long long rocksdb_delayed_write_rate;
static unsigned long // NOLINT(runtime/int)
rocksdb_persistent_cache_size;
rocksdb_persistent_cache_size_mb;
static uint64_t rocksdb_info_log_level;
static char *rocksdb_wal_dir;
static char *rocksdb_persistent_cache_path;
static uint64_t rocksdb_index_type;
static char rocksdb_background_sync;
static uint32_t rocksdb_debug_optimizer_n_rows;
static my_bool rocksdb_force_compute_memtable_stats;
static my_bool rocksdb_debug_optimizer_no_zero_cardinality;
static uint32_t rocksdb_wal_recovery_mode;
static uint32_t rocksdb_access_hint_on_compaction_start;
......@@ -413,11 +420,11 @@ static void rocksdb_set_rocksdb_info_log_level(
const void *const save) {
DBUG_ASSERT(save != nullptr);
mysql_mutex_lock(&rdb_sysvars_mutex);
RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex);
rocksdb_info_log_level = *static_cast<const uint64_t *>(save);
rocksdb_db_options.info_log->SetInfoLogLevel(
static_cast<const rocksdb::InfoLogLevel>(rocksdb_info_log_level));
mysql_mutex_unlock(&rdb_sysvars_mutex);
RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex);
}
static const char *index_type_names[] = {"kBinarySearch", "kHashSearch", NullS};
......@@ -478,6 +485,12 @@ static MYSQL_THDVAR_BOOL(
"update and delete",
nullptr, nullptr, FALSE);
static MYSQL_THDVAR_BOOL(
blind_delete_primary_key, PLUGIN_VAR_RQCMDARG,
"Deleting rows by primary key lookup, without reading rows (Blind Deletes)."
" Blind delete is disabled if the table has secondary key",
nullptr, nullptr, FALSE);
static MYSQL_THDVAR_STR(
read_free_rpl_tables, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC,
"List of tables that will use read-free replication on the slave "
......@@ -561,6 +574,13 @@ static MYSQL_SYSVAR_ULONGLONG(
nullptr, rocksdb_set_rate_limiter_bytes_per_sec, /* default */ 0L,
/* min */ 0L, /* max */ MAX_RATE_LIMITER_BYTES_PER_SEC, 0);
static MYSQL_SYSVAR_ULONGLONG(delayed_write_rate, rocksdb_delayed_write_rate,
PLUGIN_VAR_RQCMDARG,
"DBOptions::delayed_write_rate", nullptr,
rocksdb_set_delayed_write_rate,
rocksdb_db_options.delayed_write_rate, 0,
UINT64_MAX, 0);
static MYSQL_SYSVAR_ENUM(
info_log_level, rocksdb_info_log_level, PLUGIN_VAR_RQCMDARG,
"Filter level for info logs to be written mysqld error log. "
......@@ -579,8 +599,9 @@ static MYSQL_THDVAR_INT(
static MYSQL_SYSVAR_UINT(
wal_recovery_mode, rocksdb_wal_recovery_mode, PLUGIN_VAR_RQCMDARG,
"DBOptions::wal_recovery_mode for RocksDB", nullptr, nullptr,
/* default */ (uint)rocksdb::WALRecoveryMode::kPointInTimeRecovery,
"DBOptions::wal_recovery_mode for RocksDB. Default is kAbsoluteConsistency",
nullptr, nullptr,
/* default */ (uint)rocksdb::WALRecoveryMode::kAbsoluteConsistency,
/* min */ (uint)rocksdb::WALRecoveryMode::kTolerateCorruptedTailRecords,
/* max */ (uint)rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords, 0);
......@@ -637,13 +658,6 @@ static MYSQL_SYSVAR_ULONG(max_total_wal_size,
nullptr, rocksdb_db_options.max_total_wal_size,
/* min */ 0L, /* max */ LONG_MAX, 0);
static MYSQL_SYSVAR_BOOL(
disabledatasync,
*reinterpret_cast<my_bool *>(&rocksdb_db_options.disableDataSync),
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"DBOptions::disableDataSync for RocksDB", nullptr, nullptr,
rocksdb_db_options.disableDataSync);
static MYSQL_SYSVAR_BOOL(
use_fsync, *reinterpret_cast<my_bool *>(&rocksdb_db_options.use_fsync),
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
......@@ -662,10 +676,10 @@ static MYSQL_SYSVAR_STR(
nullptr, "");
static MYSQL_SYSVAR_ULONG(
persistent_cache_size, rocksdb_persistent_cache_size,
persistent_cache_size_mb, rocksdb_persistent_cache_size_mb,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"Size of cache for BlockBasedTableOptions::persistent_cache for RocksDB",
nullptr, nullptr, rocksdb_persistent_cache_size,
"Size of cache in MB for BlockBasedTableOptions::persistent_cache "
"for RocksDB", nullptr, nullptr, rocksdb_persistent_cache_size_mb,
/* min */ 0L, /* max */ ULONG_MAX, 0);
static MYSQL_SYSVAR_ULONG(
......@@ -946,9 +960,11 @@ static MYSQL_SYSVAR_BOOL(background_sync, rocksdb_background_sync,
"turns on background syncs for RocksDB", nullptr,
nullptr, FALSE);
static MYSQL_THDVAR_BOOL(write_sync, PLUGIN_VAR_RQCMDARG,
"WriteOptions::sync for RocksDB", nullptr, nullptr,
rocksdb::WriteOptions().sync);
static MYSQL_THDVAR_UINT(flush_log_at_trx_commit, PLUGIN_VAR_RQCMDARG,
"Sync on transaction commit. Similar to "
"innodb_flush_log_at_trx_commit. 1: sync on commit, "
"0,2: not sync on commit",
nullptr, nullptr, 1, 0, 2, 0);
static MYSQL_THDVAR_BOOL(write_disable_wal, PLUGIN_VAR_RQCMDARG,
"WriteOptions::disableWAL for RocksDB", nullptr,
......@@ -986,6 +1002,12 @@ static MYSQL_SYSVAR_UINT(
"Test only to override rocksdb estimates of table size in a memtable",
nullptr, nullptr, 0, /* min */ 0, /* max */ INT_MAX, 0);
static MYSQL_SYSVAR_BOOL(force_compute_memtable_stats,
rocksdb_force_compute_memtable_stats,
PLUGIN_VAR_RQCMDARG,
"Force to always compute memtable stats",
nullptr, nullptr, TRUE);
static MYSQL_SYSVAR_BOOL(
debug_optimizer_no_zero_cardinality,
rocksdb_debug_optimizer_no_zero_cardinality, PLUGIN_VAR_RQCMDARG,
......@@ -1085,6 +1107,7 @@ static MYSQL_SYSVAR_BOOL(
"Counting SingleDelete as rocksdb_compaction_sequential_deletes", nullptr,
nullptr, rocksdb_compaction_sequential_deletes_count_sd);
static MYSQL_SYSVAR_BOOL(
print_snapshot_conflict_queries, rocksdb_print_snapshot_conflict_queries,
PLUGIN_VAR_RQCMDARG,
......@@ -1104,6 +1127,11 @@ static MYSQL_THDVAR_BOOL(verify_row_debug_checksums, PLUGIN_VAR_RQCMDARG,
"Verify checksums when reading index/table records",
nullptr, nullptr, false /* default value */);
static MYSQL_THDVAR_BOOL(master_skip_tx_api, PLUGIN_VAR_RQCMDARG,
"Skipping holding any lock on row access. "
"Not effective on slave.",
nullptr, nullptr, false);
static MYSQL_SYSVAR_UINT(
validate_tables, rocksdb_validate_tables,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
......@@ -1154,6 +1182,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = {
MYSQL_SYSVAR(skip_unique_check_tables),
MYSQL_SYSVAR(trace_sst_api),
MYSQL_SYSVAR(commit_in_the_middle),
MYSQL_SYSVAR(blind_delete_primary_key),
MYSQL_SYSVAR(read_free_rpl_tables),
MYSQL_SYSVAR(bulk_load_size),
MYSQL_SYSVAR(merge_buf_size),
......@@ -1167,14 +1196,14 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = {
MYSQL_SYSVAR(error_if_exists),
MYSQL_SYSVAR(paranoid_checks),
MYSQL_SYSVAR(rate_limiter_bytes_per_sec),
MYSQL_SYSVAR(delayed_write_rate),
MYSQL_SYSVAR(info_log_level),
MYSQL_SYSVAR(max_open_files),
MYSQL_SYSVAR(max_total_wal_size),
MYSQL_SYSVAR(disabledatasync),
MYSQL_SYSVAR(use_fsync),
MYSQL_SYSVAR(wal_dir),
MYSQL_SYSVAR(persistent_cache_path),
MYSQL_SYSVAR(persistent_cache_size),
MYSQL_SYSVAR(persistent_cache_size_mb),
MYSQL_SYSVAR(delete_obsolete_files_period_micros),
MYSQL_SYSVAR(base_background_compactions),
MYSQL_SYSVAR(max_background_compactions),
......@@ -1224,7 +1253,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = {
MYSQL_SYSVAR(background_sync),
MYSQL_SYSVAR(write_sync),
MYSQL_SYSVAR(flush_log_at_trx_commit),
MYSQL_SYSVAR(write_disable_wal),
MYSQL_SYSVAR(write_ignore_missing_column_families),
......@@ -1234,6 +1263,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = {
MYSQL_SYSVAR(records_in_range),
MYSQL_SYSVAR(force_index_records_in_range),
MYSQL_SYSVAR(debug_optimizer_n_rows),
MYSQL_SYSVAR(force_compute_memtable_stats),
MYSQL_SYSVAR(debug_optimizer_no_zero_cardinality),
MYSQL_SYSVAR(compact_cf),
......@@ -1259,6 +1289,7 @@ static struct st_mysql_sys_var *rocksdb_system_variables[] = {
MYSQL_SYSVAR(checksums_pct),
MYSQL_SYSVAR(store_row_debug_checksums),
MYSQL_SYSVAR(verify_row_debug_checksums),
MYSQL_SYSVAR(master_skip_tx_api),
MYSQL_SYSVAR(validate_tables),
MYSQL_SYSVAR(table_stats_sampling_pct),
......@@ -1268,7 +1299,7 @@ static rocksdb::WriteOptions
rdb_get_rocksdb_write_options(my_core::THD *const thd) {
rocksdb::WriteOptions opt;
opt.sync = THDVAR(thd, write_sync);
opt.sync = THDVAR(thd, flush_log_at_trx_commit) == 1;
opt.disableWAL = THDVAR(thd, write_disable_wal);
opt.ignore_missing_column_families =
THDVAR(thd, write_ignore_missing_column_families);
......@@ -1291,85 +1322,6 @@ Rdb_open_tables_map::get_hash_key(Rdb_table_handler *const table_handler,
return reinterpret_cast<uchar *>(table_handler->m_table_name);
}
/*
The following is needed as an argument for mysql_stage_register,
irrespectively of whether we're compiling with P_S or not.
*/
PSI_stage_info stage_waiting_on_row_lock = {0, "Waiting for row lock", 0};
#ifdef HAVE_PSI_INTERFACE
static PSI_thread_key rdb_background_psi_thread_key;
static PSI_thread_key rdb_drop_idx_psi_thread_key;
static PSI_stage_info *all_rocksdb_stages[] = {&stage_waiting_on_row_lock};
static my_core::PSI_mutex_key rdb_psi_open_tbls_mutex_key,
rdb_signal_bg_psi_mutex_key, rdb_signal_drop_idx_psi_mutex_key,
rdb_collation_data_mutex_key, rdb_mem_cmp_space_mutex_key,
key_mutex_tx_list, rdb_sysvars_psi_mutex_key;
static PSI_mutex_info all_rocksdb_mutexes[] = {
{&rdb_psi_open_tbls_mutex_key, "open tables", PSI_FLAG_GLOBAL},
{&rdb_signal_bg_psi_mutex_key, "stop background", PSI_FLAG_GLOBAL},
{&rdb_signal_drop_idx_psi_mutex_key, "signal drop index", PSI_FLAG_GLOBAL},
{&rdb_collation_data_mutex_key, "collation data init", PSI_FLAG_GLOBAL},
{&rdb_mem_cmp_space_mutex_key, "collation space char data init",
PSI_FLAG_GLOBAL},
{&key_mutex_tx_list, "tx_list", PSI_FLAG_GLOBAL},
{&rdb_sysvars_psi_mutex_key, "setting sysvar", PSI_FLAG_GLOBAL},
};
static PSI_rwlock_key key_rwlock_collation_exception_list;
static PSI_rwlock_key key_rwlock_read_free_rpl_tables;
static PSI_rwlock_key key_rwlock_skip_unique_check_tables;
static PSI_rwlock_info all_rocksdb_rwlocks[] = {
{&key_rwlock_collation_exception_list, "collation_exception_list",
PSI_FLAG_GLOBAL},
{&key_rwlock_read_free_rpl_tables, "read_free_rpl_tables", PSI_FLAG_GLOBAL},
{&key_rwlock_skip_unique_check_tables, "skip_unique_check_tables",
PSI_FLAG_GLOBAL},
};
PSI_cond_key rdb_signal_bg_psi_cond_key, rdb_signal_drop_idx_psi_cond_key;
static PSI_cond_info all_rocksdb_conds[] = {
{&rdb_signal_bg_psi_cond_key, "cond signal background", PSI_FLAG_GLOBAL},
{&rdb_signal_drop_idx_psi_cond_key, "cond signal drop index",
PSI_FLAG_GLOBAL},
};
static PSI_thread_info all_rocksdb_threads[] = {
{&rdb_background_psi_thread_key, "background", PSI_FLAG_GLOBAL},
{&rdb_drop_idx_psi_thread_key, "drop index", PSI_FLAG_GLOBAL},
};
static void init_rocksdb_psi_keys() {
const char *const category = "rocksdb";
int count;
if (PSI_server == nullptr)
return;
count = array_elements(all_rocksdb_mutexes);
PSI_server->register_mutex(category, all_rocksdb_mutexes, count);
count = array_elements(all_rocksdb_rwlocks);
PSI_server->register_rwlock(category, all_rocksdb_rwlocks, count);
count = array_elements(all_rocksdb_conds);
// TODO Disabling PFS for conditions due to the bug
// https://github.com/MySQLOnRocksDB/mysql-5.6/issues/92
// PSI_server->register_cond(category, all_rocksdb_conds, count);
count = array_elements(all_rocksdb_stages);
mysql_stage_register(category, all_rocksdb_stages, count);
count = array_elements(all_rocksdb_threads);
mysql_thread_register(category, all_rocksdb_threads, count);
}
#endif
/*
Drop index thread's control
*/
......@@ -1503,10 +1455,12 @@ class Rdb_transaction {
static void walk_tx_list(Rdb_tx_list_walker *walker) {
DBUG_ASSERT(walker != nullptr);
mysql_mutex_lock(&s_tx_list_mutex);
RDB_MUTEX_LOCK_CHECK(s_tx_list_mutex);
for (auto it : s_tx_list)
walker->process_tran(it);
mysql_mutex_unlock(&s_tx_list_mutex);
RDB_MUTEX_UNLOCK_CHECK(s_tx_list_mutex);
}
int set_status_error(THD *const thd, const rocksdb::Status &s,
......@@ -1850,15 +1804,15 @@ class Rdb_transaction {
explicit Rdb_transaction(THD *const thd)
: m_thd(thd), m_tbl_io_perf(nullptr) {
mysql_mutex_lock(&s_tx_list_mutex);
RDB_MUTEX_LOCK_CHECK(s_tx_list_mutex);
s_tx_list.insert(this);
mysql_mutex_unlock(&s_tx_list_mutex);
RDB_MUTEX_UNLOCK_CHECK(s_tx_list_mutex);
}
virtual ~Rdb_transaction() {
mysql_mutex_lock(&s_tx_list_mutex);
RDB_MUTEX_LOCK_CHECK(s_tx_list_mutex);
s_tx_list.erase(this);
mysql_mutex_unlock(&s_tx_list_mutex);
RDB_MUTEX_UNLOCK_CHECK(s_tx_list_mutex);
}
};
......@@ -2075,7 +2029,7 @@ class Rdb_transaction_impl : public Rdb_transaction {
tx_opts.lock_timeout = rdb_convert_sec_to_ms(m_timeout_sec);
tx_opts.deadlock_detect = THDVAR(m_thd, deadlock_detect);
write_opts.sync = THDVAR(m_thd, write_sync);
write_opts.sync = THDVAR(m_thd, flush_log_at_trx_commit) == 1;
write_opts.disableWAL = THDVAR(m_thd, write_disable_wal);
write_opts.ignore_missing_column_families =
THDVAR(m_thd, write_ignore_missing_column_families);
......@@ -2288,7 +2242,7 @@ class Rdb_writebatch_impl : public Rdb_transaction {
void start_tx() override {
reset();
write_opts.sync = THDVAR(m_thd, write_sync);
write_opts.sync = THDVAR(m_thd, flush_log_at_trx_commit) == 1;
write_opts.disableWAL = THDVAR(m_thd, write_disable_wal);
write_opts.ignore_missing_column_families =
THDVAR(m_thd, write_ignore_missing_column_families);
......@@ -2367,10 +2321,15 @@ static Rdb_transaction *get_or_create_tx(THD *const thd) {
Rdb_transaction *&tx = get_tx_from_thd(thd);
// TODO: this is called too many times.. O(#rows)
if (tx == nullptr) {
if (rpl_skip_tx_api && thd->rli_slave)
if ((rpl_skip_tx_api && thd->rli_slave) ||
(THDVAR(thd, master_skip_tx_api) && !thd->rli_slave))
{
tx = new Rdb_writebatch_impl(thd);
}
else
{
tx = new Rdb_transaction_impl(thd);
}
tx->set_params(THDVAR(thd, lock_wait_timeout), THDVAR(thd, max_row_locks));
tx->start_tx();
} else {
......@@ -2474,7 +2433,7 @@ static int rocksdb_prepare(handlerton *const hton, THD *const thd,
return HA_EXIT_FAILURE;
}
if (thd->durability_property == HA_IGNORE_DURABILITY &&
THDVAR(thd, write_sync)) {
(THDVAR(thd, flush_log_at_trx_commit) == 1)) {
/**
we set the log sequence as '1' just to trigger hton->flush_logs
*/
......@@ -3232,9 +3191,7 @@ static int rocksdb_init_func(void *const p) {
// Validate the assumption about the size of ROCKSDB_SIZEOF_HIDDEN_PK_COLUMN.
static_assert(sizeof(longlong) == 8, "Assuming that longlong is 8 bytes.");
#ifdef HAVE_PSI_INTERFACE
init_rocksdb_psi_keys();
#endif
rocksdb_hton = (handlerton *)p;
mysql_mutex_init(rdb_psi_open_tbls_mutex_key, &rdb_open_tables.m_mutex,
......@@ -3298,6 +3255,8 @@ static int rocksdb_init_func(void *const p) {
rocksdb_db_options.rate_limiter = rocksdb_rate_limiter;
}
rocksdb_db_options.delayed_write_rate = rocksdb_delayed_write_rate;
std::shared_ptr<Rdb_logger> myrocks_logger = std::make_shared<Rdb_logger>();
rocksdb::Status s = rocksdb::CreateLoggerFromOptions(
rocksdb_datadir, rocksdb_db_options, &rocksdb_db_options.info_log);
......@@ -3383,24 +3342,25 @@ static int rocksdb_init_func(void *const p) {
rocksdb_set_compaction_options(nullptr, nullptr, nullptr, nullptr);
mysql_mutex_lock(&rdb_sysvars_mutex);
RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex);
DBUG_ASSERT(rocksdb_table_stats_sampling_pct <=
RDB_TBL_STATS_SAMPLE_PCT_MAX);
properties_collector_factory->SetTableStatsSamplingPct(
rocksdb_table_stats_sampling_pct);
mysql_mutex_unlock(&rdb_sysvars_mutex);
RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex);
}
if (rocksdb_persistent_cache_size > 0) {
if (rocksdb_persistent_cache_size_mb > 0) {
std::shared_ptr<rocksdb::PersistentCache> pcache;
uint64_t cache_size_bytes= rocksdb_persistent_cache_size_mb * 1024 * 1024;
rocksdb::NewPersistentCache(
rocksdb::Env::Default(), std::string(rocksdb_persistent_cache_path),
rocksdb_persistent_cache_size, myrocks_logger, true, &pcache);
cache_size_bytes, myrocks_logger, true, &pcache);
rocksdb_tbl_options.persistent_cache = pcache;
} else if (strlen(rocksdb_persistent_cache_path)) {
sql_print_error("RocksDB: Must specify rocksdb_persistent_cache_size");
sql_print_error("RocksDB: Must specify rocksdb_persistent_cache_size_mb");
DBUG_RETURN(1);
}
......@@ -3581,7 +3541,7 @@ static int rocksdb_done_func(void *const p) {
// signal the drop index thread to stop
rdb_drop_idx_thread.signal(true);
// Flush all memtables for not lose data, even if WAL is disabled.
// Flush all memtables for not losing data, even if WAL is disabled.
rocksdb_flush_all_memtables();
// Stop all rocksdb background work
......@@ -3652,6 +3612,16 @@ static int rocksdb_done_func(void *const p) {
DBUG_RETURN(error);
}
static inline void rocksdb_smart_seek(bool seek_backward,
rocksdb::Iterator *const iter,
const rocksdb::Slice &key_slice) {
if (seek_backward) {
iter->SeekForPrev(key_slice);
} else {
iter->Seek(key_slice);
}
}
/**
@brief
Example of simple lock controls. The "table_handler" it creates is a
......@@ -3670,7 +3640,7 @@ Rdb_open_tables_map::get_table_handler(const char *const table_name) {
length = (uint)strlen(table_name);
// First, look up the table in the hash map.
mysql_mutex_lock(&m_mutex);
RDB_MUTEX_LOCK_CHECK(m_mutex);
if (!(table_handler = reinterpret_cast<Rdb_table_handler *>(my_hash_search(
&m_hash, reinterpret_cast<const uchar *>(table_name), length)))) {
// Since we did not find it in the hash map, attempt to create and add it
......@@ -3679,7 +3649,7 @@ Rdb_open_tables_map::get_table_handler(const char *const table_name) {
MYF(MY_WME | MY_ZEROFILL), &table_handler, sizeof(*table_handler),
&tmp_name, length + 1, NullS)))) {
// Allocating a new Rdb_table_handler and a new table name failed.
mysql_mutex_unlock(&m_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
return nullptr;
}
......@@ -3690,7 +3660,7 @@ Rdb_open_tables_map::get_table_handler(const char *const table_name) {
if (my_hash_insert(&m_hash, reinterpret_cast<uchar *>(table_handler))) {
// Inserting into the hash map failed.
mysql_mutex_unlock(&m_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
my_free(table_handler);
return nullptr;
}
......@@ -3701,7 +3671,7 @@ Rdb_open_tables_map::get_table_handler(const char *const table_name) {
DBUG_ASSERT(table_handler->m_ref_count >= 0);
table_handler->m_ref_count++;
mysql_mutex_unlock(&m_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
return table_handler;
}
......@@ -3715,7 +3685,7 @@ std::vector<std::string> Rdb_open_tables_map::get_table_names(void) const {
const Rdb_table_handler *table_handler;
std::vector<std::string> names;
mysql_mutex_lock(&m_mutex);
RDB_MUTEX_LOCK_CHECK(m_mutex);
for (i = 0; (table_handler = reinterpret_cast<const Rdb_table_handler *>(
my_hash_const_element(&m_hash, i)));
i++) {
......@@ -3723,7 +3693,7 @@ std::vector<std::string> Rdb_open_tables_map::get_table_names(void) const {
names.push_back(table_handler->m_table_name);
}
DBUG_ASSERT(i == m_hash.records);
mysql_mutex_unlock(&m_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
return names;
}
......@@ -3872,7 +3842,7 @@ int ha_rocksdb::read_hidden_pk_id_from_rowkey(longlong *const hidden_pk_id) {
void Rdb_open_tables_map::release_table_handler(
Rdb_table_handler *const table_handler) {
mysql_mutex_lock(&m_mutex);
RDB_MUTEX_LOCK_CHECK(m_mutex);
DBUG_ASSERT(table_handler != nullptr);
DBUG_ASSERT(table_handler->m_ref_count > 0);
......@@ -3885,7 +3855,7 @@ void Rdb_open_tables_map::release_table_handler(
my_free(table_handler);
}
mysql_mutex_unlock(&m_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
}
static handler *rocksdb_create_handler(my_core::handlerton *const hton,
......@@ -4137,6 +4107,92 @@ int ha_rocksdb::convert_record_from_storage_format(
return convert_record_from_storage_format(key, &retrieved_rec_slice, buf);
}
int ha_rocksdb::convert_blob_from_storage_format(
my_core::Field_blob *const blob,
Rdb_string_reader *const reader,
bool decode)
{
/* Get the number of bytes needed to store length*/
const uint length_bytes = blob->pack_length() - portable_sizeof_char_ptr;
const char *data_len_str;
if (!(data_len_str = reader->read(length_bytes))) {
return HA_ERR_INTERNAL_ERROR;
}
memcpy(blob->ptr, data_len_str, length_bytes);
const uint32 data_len = blob->get_length(
reinterpret_cast<const uchar*>(data_len_str), length_bytes,
table->s->db_low_byte_first);
const char *blob_ptr;
if (!(blob_ptr = reader->read(data_len))) {
return HA_ERR_INTERNAL_ERROR;
}
if (decode) {
// set 8-byte pointer to 0, like innodb does (relevant for 32-bit
// platforms)
memset(blob->ptr + length_bytes, 0, 8);
memcpy(blob->ptr + length_bytes, &blob_ptr, sizeof(uchar **));
}
return HA_EXIT_SUCCESS;
}
int ha_rocksdb::convert_varchar_from_storage_format(
my_core::Field_varstring *const field_var,
Rdb_string_reader *const reader,
bool decode)
{
const char *data_len_str;
if (!(data_len_str = reader->read(field_var->length_bytes)))
return HA_ERR_INTERNAL_ERROR;
uint data_len;
/* field_var->length_bytes is 1 or 2 */
if (field_var->length_bytes == 1) {
data_len = (uchar)data_len_str[0];
} else {
DBUG_ASSERT(field_var->length_bytes == 2);
data_len = uint2korr(data_len_str);
}
if (data_len > field_var->field_length) {
/* The data on disk is longer than table DDL allows? */
return HA_ERR_INTERNAL_ERROR;
}
if (!reader->read(data_len)) {
return HA_ERR_INTERNAL_ERROR;
}
if (decode) {
memcpy(field_var->ptr, data_len_str, field_var->length_bytes + data_len);
}
return HA_EXIT_SUCCESS;
}
int ha_rocksdb::convert_field_from_storage_format(
my_core::Field *const field,
Rdb_string_reader *const reader,
bool decode,
uint len)
{
const char *data_bytes;
if (len > 0) {
if ((data_bytes = reader->read(len)) == nullptr) {
return HA_ERR_INTERNAL_ERROR;
}
if (decode)
memcpy(field->ptr, data_bytes, len);
}
return HA_EXIT_SUCCESS;
}
/*
@brief
Unpack the record in this->m_retrieved_record and this->m_last_rowkey from
......@@ -4168,7 +4224,6 @@ int ha_rocksdb::convert_record_from_storage_format(
DBUG_ASSERT(buf != nullptr);
Rdb_string_reader reader(value);
const my_ptrdiff_t ptr_diff = buf - table->record[0];
/*
Decode PK fields from the key
......@@ -4208,6 +4263,7 @@ int ha_rocksdb::convert_record_from_storage_format(
return HA_ERR_INTERNAL_ERROR;
}
int err = HA_EXIT_SUCCESS;
for (auto it = m_decoders_vect.begin(); it != m_decoders_vect.end(); it++) {
const Rdb_field_encoder *const field_dec = it->m_field_enc;
const bool decode = it->m_decode;
......@@ -4221,89 +4277,49 @@ int ha_rocksdb::convert_record_from_storage_format(
if (it->m_skip && !reader.read(it->m_skip))
return HA_ERR_INTERNAL_ERROR;
uint field_offset = field->ptr - table->record[0];
uint null_offset = field->null_offset();
bool maybe_null = field->real_maybe_null();
field->move_field(buf + field_offset,
maybe_null ? buf + null_offset : nullptr,
field->null_bit);
// WARNING! - Don't return before restoring field->ptr and field->null_ptr!
if (isNull) {
if (decode) {
/* This sets the NULL-bit of this record */
field->set_null(ptr_diff);
field->set_null();
/*
Besides that, set the field value to default value. CHECKSUM TABLE
depends on this.
*/
uint field_offset = field->ptr - table->record[0];
memcpy(buf + field_offset, table->s->default_values + field_offset,
memcpy(field->ptr, table->s->default_values + field_offset,
field->pack_length());
}
continue;
} else {
if (decode)
field->set_notnull(ptr_diff);
if (decode) {
field->set_notnull();
}
if (field_dec->m_field_type == MYSQL_TYPE_BLOB) {
my_core::Field_blob *const blob = (my_core::Field_blob *)field;
/* Get the number of bytes needed to store length*/
const uint length_bytes = blob->pack_length() - portable_sizeof_char_ptr;
blob->move_field_offset(ptr_diff);
const char *data_len_str;
if (!(data_len_str = reader.read(length_bytes))) {
blob->move_field_offset(-ptr_diff);
return HA_ERR_INTERNAL_ERROR;
}
memcpy(blob->ptr, data_len_str, length_bytes);
const uint32 data_len = blob->get_length(
(uchar *)data_len_str, length_bytes, table->s->db_low_byte_first);
const char *blob_ptr;
if (!(blob_ptr = reader.read(data_len))) {
blob->move_field_offset(-ptr_diff);
return HA_ERR_INTERNAL_ERROR;
}
if (decode) {
// set 8-byte pointer to 0, like innodb does (relevant for 32-bit
// platforms)
memset(blob->ptr + length_bytes, 0, 8);
memcpy(blob->ptr + length_bytes, &blob_ptr, sizeof(uchar **));
blob->move_field_offset(-ptr_diff);
}
err = convert_blob_from_storage_format(
(my_core::Field_blob *) field, &reader, decode);
} else if (field_dec->m_field_type == MYSQL_TYPE_VARCHAR) {
Field_varstring *const field_var = (Field_varstring *)field;
const char *data_len_str;
if (!(data_len_str = reader.read(field_var->length_bytes)))
return HA_ERR_INTERNAL_ERROR;
uint data_len;
/* field_var->length_bytes is 1 or 2 */
if (field_var->length_bytes == 1) {
data_len = (uchar)data_len_str[0];
err = convert_varchar_from_storage_format(
(my_core::Field_varstring *) field, &reader, decode);
} else {
DBUG_ASSERT(field_var->length_bytes == 2);
data_len = uint2korr(data_len_str);
err = convert_field_from_storage_format(
field, &reader, decode, field_dec->m_pack_length_in_rec);
}
if (data_len > field->field_length) {
/* The data on disk is longer than table DDL allows? */
return HA_ERR_INTERNAL_ERROR;
}
if (!reader.read(data_len))
return HA_ERR_INTERNAL_ERROR;
if (decode) {
memcpy(field_var->ptr + ptr_diff, data_len_str,
field_var->length_bytes + data_len);
}
} else {
const char *data_bytes;
const uint len = field_dec->m_pack_length_in_rec;
if (len > 0) {
if ((data_bytes = reader.read(len)) == nullptr) {
return HA_ERR_INTERNAL_ERROR;
}
if (decode)
memcpy(field->ptr + ptr_diff, data_bytes, len);
}
// Restore field->ptr and field->null_ptr
field->move_field(table->record[0] + field_offset,
maybe_null ? table->record[0] + null_offset : nullptr,
field->null_bit);
if (err != HA_EXIT_SUCCESS) {
return err;
}
}
......@@ -4723,6 +4739,14 @@ bool ha_rocksdb::get_error_message(const int error, String *const buf) {
buf->append(tx->m_detailed_error);
temp_error = true;
break;
case HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED:
buf->append("Specifying DATA DIRECTORY for an individual table is not "
"supported.");
break;
case HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED:
buf->append("Specifying INDEX DIRECTORY for an individual table is not "
"supported.");
break;
default:
// We can be called with the values which are < HA_ERR_FIRST because most
// MySQL internal functions will just return HA_EXIT_FAILURE in case of
......@@ -4791,7 +4815,7 @@ int ha_rocksdb::create_key_defs(
*/
if (create_cfs(table_arg, tbl_def_arg, &cfs)) {
DBUG_RETURN(HA_EXIT_FAILURE);
};
}
if (!old_tbl_def_arg) {
/*
......@@ -4845,6 +4869,7 @@ int ha_rocksdb::create_cfs(
DBUG_ASSERT(table_arg != nullptr);
DBUG_ASSERT(table_arg->s != nullptr);
DBUG_ASSERT(tbl_def_arg != nullptr);
char tablename_sys[NAME_LEN + 1];
......@@ -4884,35 +4909,53 @@ int ha_rocksdb::create_cfs(
}
}
/*
index comment has Column Family name. If there was no comment, we get
NULL, and it means use the default column family.
*/
const char *const comment = get_key_comment(i, table_arg, tbl_def_arg);
// Internal consistency check to make sure that data in TABLE and
// Rdb_tbl_def structures matches. Either both are missing or both are
// specified. Yes, this is critical enough to make it into SHIP_ASSERT.
SHIP_ASSERT(!table_arg->part_info == tbl_def_arg->base_partition().empty());
// Generate the name for the column family to use.
bool per_part_match_found = false;
std::string cf_name = generate_cf_name(i, table_arg, tbl_def_arg,
&per_part_match_found);
const char *const key_name = get_key_name(i, table_arg, tbl_def_arg);
if (looks_like_per_index_cf_typo(comment)) {
if (looks_like_per_index_cf_typo(cf_name.c_str())) {
my_error(ER_NOT_SUPPORTED_YET, MYF(0),
"column family name looks like a typo of $per_index_cf");
"column family name looks like a typo of $per_index_cf.");
DBUG_RETURN(HA_EXIT_FAILURE);
}
/* Prevent create from using the system column family */
if (comment && strcmp(DEFAULT_SYSTEM_CF_NAME, comment) == 0) {
// Prevent create from using the system column family.
if (!cf_name.empty() && strcmp(DEFAULT_SYSTEM_CF_NAME,
cf_name.c_str()) == 0) {
my_error(ER_WRONG_ARGUMENTS, MYF(0),
"column family not valid for storing index data");
"column family not valid for storing index data.");
DBUG_RETURN(HA_EXIT_FAILURE);
}
bool is_auto_cf_flag;
// Here's how `get_or_create_cf` will use the input parameters:
//
// `cf_name` - will be used as a CF name.
// `key_name` - will be only used in case of "$per_index_cf".
cf_handle =
cf_manager.get_or_create_cf(rdb, comment, tbl_def_arg->full_tablename(),
key_name, &is_auto_cf_flag);
if (!cf_handle)
cf_manager.get_or_create_cf(rdb, cf_name.c_str(),
tbl_def_arg->full_tablename(), key_name,
&is_auto_cf_flag);
if (!cf_handle) {
DBUG_RETURN(HA_EXIT_FAILURE);
}
auto &cf = (*cfs)[i];
cf.cf_handle = cf_handle;
cf.is_reverse_cf = Rdb_cf_manager::is_cf_name_reverse(comment);
cf.is_reverse_cf = Rdb_cf_manager::is_cf_name_reverse(cf_name.c_str());
cf.is_auto_cf = is_auto_cf_flag;
cf.is_per_partition_cf = per_part_match_found;
}
DBUG_RETURN(HA_EXIT_SUCCESS);
......@@ -4982,7 +5025,8 @@ int ha_rocksdb::create_inplace_key_defs(
new_key_descr[i] = std::make_shared<Rdb_key_def>(
okd.get_index_number(), i, okd.get_cf(), index_dict_version,
index_type, kv_version, okd.m_is_reverse_cf, okd.m_is_auto_cf,
okd.m_name.c_str(), dict_manager.get_stats(gl_index_id));
okd.m_is_per_partition_cf, okd.m_name.c_str(),
dict_manager.get_stats(gl_index_id));
} else if (create_key_def(table_arg, i, tbl_def_arg, &new_key_descr[i],
cfs[i])) {
DBUG_RETURN(HA_EXIT_FAILURE);
......@@ -5118,7 +5162,8 @@ int ha_rocksdb::create_key_def(const TABLE *const table_arg, const uint &i,
const char *const key_name = get_key_name(i, table_arg, m_tbl_def);
*new_key_def = std::make_shared<Rdb_key_def>(
index_id, i, cf_info.cf_handle, index_dict_version, index_type,
kv_version, cf_info.is_reverse_cf, cf_info.is_auto_cf, key_name);
kv_version, cf_info.is_reverse_cf, cf_info.is_auto_cf,
cf_info.is_per_partition_cf, key_name);
DBUG_RETURN(HA_EXIT_SUCCESS);
}
......@@ -5271,6 +5316,22 @@ int ha_rocksdb::create(const char *const name, TABLE *const table_arg,
HA_CREATE_INFO *const create_info) {
DBUG_ENTER_FUNC();
DBUG_ASSERT(table_arg != nullptr);
DBUG_ASSERT(create_info != nullptr);
if (create_info->data_file_name) {
// DATA DIRECTORY is used to create tables under a specific location
// outside the MySQL data directory. We don't support this for MyRocks.
// The `rocksdb_datadir` setting should be used to configure RocksDB data
// directory.
DBUG_RETURN(HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED);
}
if (create_info->index_file_name) {
// Similar check for INDEX DIRECTORY as well.
DBUG_RETURN(HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED);
}
int res;
std::string str;
......@@ -5441,15 +5502,7 @@ int ha_rocksdb::read_key_exact(const Rdb_key_def &kd,
index_tuple= lookup_tuple.
lookup_tuple may be a prefix of the index.
*/
if (kd.m_is_reverse_cf) {
if (!full_key_match) {
if (!iter->Valid())
iter->SeekToLast();
else
iter->Prev();
}
}
rocksdb_smart_seek(kd.m_is_reverse_cf, iter, key_slice);
if (!iter->Valid() || !kd.value_matches_prefix(iter->key(), key_slice)) {
/*
Got a record that is not equal to the lookup value, or even a record
......@@ -5467,24 +5520,20 @@ int ha_rocksdb::read_before_key(const Rdb_key_def &kd,
We are looking for record with the biggest t.key such that
t.key < lookup_tuple.
*/
if (kd.m_is_reverse_cf) {
rocksdb_smart_seek(!kd.m_is_reverse_cf, m_scan_it, key_slice);
if (m_scan_it->Valid() && full_key_match &&
kd.value_matches_prefix(m_scan_it->key(), key_slice)) {
/* We are using full key and we've hit an exact match */
if (kd.m_is_reverse_cf) {
m_scan_it->Next();
}
} else {
if (m_scan_it->Valid())
m_scan_it->Prev();
else
m_scan_it->SeekToLast();
}
}
return m_scan_it->Valid() ? HA_EXIT_SUCCESS : HA_ERR_KEY_NOT_FOUND;
}
int ha_rocksdb::read_after_key(const Rdb_key_def &kd,
const bool &full_key_match,
const rocksdb::Slice &key_slice) {
/*
We are looking for the first record such that
......@@ -5494,22 +5543,7 @@ int ha_rocksdb::read_after_key(const Rdb_key_def &kd,
with HA_READ_AFTER_KEY, $GT = '>',
with HA_READ_KEY_OR_NEXT, $GT = '>='
*/
if (kd.m_is_reverse_cf) {
if (!m_scan_it->Valid()) {
m_scan_it->SeekToLast();
} else {
/*
We should step back
- when not using full extended key
- when using full extended key and when we've got an exact match
*/
if (!full_key_match ||
!kd.value_matches_prefix(m_scan_it->key(), key_slice)) {
m_scan_it->Prev();
}
}
}
rocksdb_smart_seek(kd.m_is_reverse_cf, m_scan_it, key_slice);
return m_scan_it->Valid() ? HA_EXIT_SUCCESS : HA_ERR_KEY_NOT_FOUND;
}
......@@ -5538,7 +5572,7 @@ int ha_rocksdb::position_to_correct_key(const Rdb_key_def &kd,
break;
case HA_READ_AFTER_KEY:
case HA_READ_KEY_OR_NEXT:
rc = read_after_key(kd, full_key_match, key_slice);
rc = read_after_key(kd, key_slice);
if (rc == 0 && !kd.covers_key(m_scan_it->key())) {
/* The record we've got is not from this index */
rc = HA_ERR_KEY_NOT_FOUND;
......@@ -5905,8 +5939,10 @@ int ha_rocksdb::index_read_map_impl(uchar *const buf, const uchar *const key,
*/
const uint size = kd.pack_index_tuple(table, m_pack_buffer,
m_pk_packed_tuple, key, keypart_map);
rc = get_row_by_rowid(buf, m_pk_packed_tuple, size);
if (!rc) {
bool skip_lookup = is_blind_delete_enabled();
rc = get_row_by_rowid(buf, m_pk_packed_tuple, size,
skip_lookup);
if (!rc && !skip_lookup) {
stats.rows_read++;
stats.rows_index_first++;
update_row_stats(ROWS_READ);
......@@ -6339,6 +6375,17 @@ rocksdb::Status ha_rocksdb::get_for_update(
return s;
}
bool ha_rocksdb::is_blind_delete_enabled()
{
THD *thd = ha_thd();
return (THDVAR(thd, blind_delete_primary_key) &&
thd->lex->sql_command == SQLCOM_DELETE &&
thd->lex->table_count == 1 &&
table->s->keys == 1 &&
!has_hidden_pk(table) &&
!thd->rli_slave);
}
/*
Given a rowid (i.e. packed PK) as a parameter, get the record.
......@@ -6348,7 +6395,8 @@ rocksdb::Status ha_rocksdb::get_for_update(
*/
int ha_rocksdb::get_row_by_rowid(uchar *const buf, const char *const rowid,
const uint rowid_size) {
const uint rowid_size,
const bool skip_lookup) {
DBUG_ENTER_FUNC();
DBUG_ASSERT(buf != nullptr);
......@@ -6374,6 +6422,16 @@ int ha_rocksdb::get_row_by_rowid(uchar *const buf, const char *const rowid,
bool found;
rocksdb::Status s;
/* Pretend row found without looking up */
if (skip_lookup)
{
stats.rows_deleted_blind++;
update_row_stats(ROWS_DELETED_BLIND);
m_last_rowkey.copy((const char *)rowid, rowid_size, &my_charset_bin);
table->status = 0;
DBUG_RETURN(0);
}
if (m_lock_rows == RDB_LOCK_NONE) {
tx->acquire_snapshot(true);
s = tx->get(m_pk_descr->get_cf(), key_slice, &m_retrieved_record);
......@@ -6554,6 +6612,7 @@ int ha_rocksdb::index_first_intern(uchar *const buf) {
for (;;) {
setup_scan_iterator(kd, &index_key, false, !kd.m_is_reverse_cf,
Rdb_key_def::INDEX_NUMBER_SIZE);
m_scan_it->Seek(index_key);
m_skip_scan_it_next_call = true;
rc = index_next_with_direction(buf, true);
......@@ -6649,21 +6708,7 @@ int ha_rocksdb::index_last_intern(uchar *const buf) {
for (;;) {
setup_scan_iterator(kd, &index_key, false, kd.m_is_reverse_cf,
Rdb_key_def::INDEX_NUMBER_SIZE);
if (!m_scan_it->Valid()) {
/*
RocksDB keyspace has no records that follow our index. It could be, our
index is the last in the keyspace. Navigate to last record in the
keyspace.
*/
m_scan_it->SeekToLast();
} else {
/*
Iterator is pointing at some record right after the end of the index.
Move one record back.
*/
m_scan_it->Prev();
}
m_scan_it->SeekForPrev(index_key);
m_skip_scan_it_next_call = false;
if (is_pk(active_index, table, m_tbl_def)) {
......@@ -6726,11 +6771,15 @@ bool ha_rocksdb::skip_unique_check() const {
2) this table is in the whitelist of tables to skip and the replication
lag has reached a large enough value (see unique_check_lag_threshold
and unique_check_lage_reset_threshold)
3) the user set unique_checks option to 0
3) the user set unique_checks option to 0, and the table does not have
any indexes. If the table has secondary keys, then those might becomes
inconsisted/corrupted
*/
return THDVAR(table->in_use, bulk_load) ||
(m_force_skip_unique_check && m_skip_unique_check) ||
my_core::thd_test_options(table->in_use, OPTION_RELAXED_UNIQUE_CHECKS);
(my_core::thd_test_options(table->in_use,
OPTION_RELAXED_UNIQUE_CHECKS) &&
m_tbl_def->m_key_count == 1);
}
void ha_rocksdb::set_force_skip_unique_check(bool skip) {
......@@ -6805,15 +6854,31 @@ bool ha_rocksdb::is_pk(const uint index, const TABLE *const table_arg,
is_hidden_pk(index, table_arg, tbl_def_arg);
}
/*
Formats the string and returns the column family name assignment part for a
specific partition.
*/
const std::string ha_rocksdb::gen_cf_name_qualifier_for_partition(
const std::string& prefix) {
DBUG_ASSERT(!prefix.empty());
return prefix + RDB_PER_PARTITION_QUALIFIER_NAME_SEP + RDB_CF_NAME_QUALIFIER
+ RDB_PER_PARTITION_QUALIFIER_VALUE_SEP;
}
const char *ha_rocksdb::get_key_name(const uint index,
const TABLE *const table_arg,
const Rdb_tbl_def *const tbl_def_arg) {
DBUG_ASSERT(table_arg != nullptr);
DBUG_ASSERT(tbl_def_arg != nullptr);
if (is_hidden_pk(index, table_arg, tbl_def_arg)) {
return HIDDEN_PK_NAME;
}
DBUG_ASSERT(table_arg->key_info != nullptr);
DBUG_ASSERT(table_arg->key_info[index].name != nullptr);
return table_arg->key_info[index].name;
}
......@@ -6821,14 +6886,84 @@ const char *ha_rocksdb::get_key_comment(const uint index,
const TABLE *const table_arg,
const Rdb_tbl_def *const tbl_def_arg) {
DBUG_ASSERT(table_arg != nullptr);
DBUG_ASSERT(tbl_def_arg != nullptr);
if (is_hidden_pk(index, table_arg, tbl_def_arg)) {
return nullptr;
}
DBUG_ASSERT(table_arg->key_info != nullptr);
return table_arg->key_info[index].comment.str;
}
const std::string ha_rocksdb::generate_cf_name(const uint index,
const TABLE *const table_arg,
const Rdb_tbl_def *const tbl_def_arg,
bool *per_part_match_found) {
DBUG_ASSERT(table_arg != nullptr);
DBUG_ASSERT(tbl_def_arg != nullptr);
DBUG_ASSERT(per_part_match_found != nullptr);
// When creating CF-s the caller needs to know if there was a custom CF name
// specified for a given paritition.
*per_part_match_found = false;
// Index comment is used to define the column family name specification(s).
// If there was no comment, we get an emptry string, and it means "use the
// default column family".
const char *const comment = get_key_comment(index, table_arg, tbl_def_arg);
// `get_key_comment` can return `nullptr`, that's why this.
std::string key_comment = comment ? comment : "";
// If table has partitions then we need to check if user has requested to
// create a column family with a specific name on a per partition basis.
if (table_arg->part_info != nullptr) {
std::string partition_name = tbl_def_arg->base_partition();
DBUG_ASSERT(!partition_name.empty());
// Let's fetch the comment for a index and check if there's a custom key
// name specified for a partition we are handling.
std::vector<std::string> v = myrocks::parse_into_tokens(key_comment,
RDB_QUALIFIER_SEP);
std::string part_to_search = gen_cf_name_qualifier_for_partition(
partition_name);
DBUG_ASSERT(!part_to_search.empty());
// Basic O(N) search for a matching assignment. At most we expect maybe
// ten or so elements here.
for (const auto &it : v) {
if (it.substr(0, part_to_search.length()) == part_to_search) {
// We found a prefix match. Try to parse it as an assignment.
std::vector<std::string> tokens = myrocks::parse_into_tokens(it,
RDB_PER_PARTITION_QUALIFIER_VALUE_SEP);
// We found a custom name, it was in the form we expected it to be.
// Return that instead of whatever we initially wanted to return. In
// a case below the `foo` part will be returned to the caller.
//
// p3_cfname=foo
//
// If no value was specified then we'll return an empty string which
// later gets translated into using a default CF.
if (tokens.size() == 2) {
*per_part_match_found = true;
return tokens[1];
} else {
return "";
}
}
}
// At this point we tried to search for a custom CF name for a partition,
// but none was specified. Therefore default one will be used.
return "";
}
return key_comment;
}
int ha_rocksdb::write_row(uchar *const buf) {
DBUG_ENTER_FUNC();
......@@ -7188,7 +7323,7 @@ int ha_rocksdb::finalize_bulk_load() {
return rc;
}
mysql_mutex_lock(&m_bulk_load_mutex);
RDB_MUTEX_LOCK_CHECK(m_bulk_load_mutex);
/*
We need this check because it's possible that m_sst_info has been
......@@ -7203,7 +7338,8 @@ int ha_rocksdb::finalize_bulk_load() {
mysql prints via my_printf_error.
*/
sql_print_error("Failed to commit bulk loaded sst file to the "
"data store (%s)", m_sst_info->error_message().c_str());
"data store (%s)",
m_sst_info->error_message().c_str());
my_printf_error(ER_UNKNOWN_ERROR,
"Failed to commit bulk loaded sst file to the "
......@@ -7217,7 +7353,8 @@ int ha_rocksdb::finalize_bulk_load() {
m_bulk_load_tx = nullptr;
}
mysql_mutex_unlock(&m_bulk_load_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_bulk_load_mutex);
return rc;
}
......@@ -7462,7 +7599,7 @@ int ha_rocksdb::update_write_row(const uchar *const old_data,
}
/*
Open a cursor and position it at the passed record
Open a cursor
*/
void ha_rocksdb::setup_scan_iterator(const Rdb_key_def &kd,
......@@ -7522,12 +7659,6 @@ void ha_rocksdb::setup_scan_iterator(const Rdb_key_def &kd,
}
m_scan_it_skips_bloom = skip_bloom;
}
/*
Seek() will "Position at the first key in the source that at or past
target".
The operation cannot fail.
*/
m_scan_it->Seek(*slice);
}
void ha_rocksdb::release_scan_iterator() {
......@@ -7551,6 +7682,7 @@ void ha_rocksdb::setup_iterator_for_rnd_scan() {
rocksdb::Slice table_key((const char *)m_pk_packed_tuple, key_size);
setup_scan_iterator(*m_pk_descr, &table_key);
m_scan_it->Seek(table_key);
m_skip_scan_it_next_call = true;
}
......@@ -7859,20 +7991,33 @@ int ha_rocksdb::info(uint flag) {
update_stats();
}
if (stats.records == 0) {
// most likely, the table is in memtable
// try to deduce from GetApproximateSizes
// if number of records is hardcoded, we do not want to force computation
// of memtable cardinalities
if (stats.records == 0 ||
(rocksdb_force_compute_memtable_stats &&
rocksdb_debug_optimizer_n_rows == 0))
{
// First, compute SST files stats
uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2];
auto r = get_range(pk_index(table, m_tbl_def), buf);
uint64_t sz = 0;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
rdb->GetApproximateSizes(m_pk_descr->get_cf(), &r, 1, &sz, true);
#pragma GCC diagnostic pop
uint8_t include_flags = rocksdb::DB::INCLUDE_FILES;
// recompute SST files stats only if records count is 0
if (stats.records == 0) {
rdb->GetApproximateSizes(m_pk_descr->get_cf(), &r, 1, &sz,
include_flags);
stats.records+= sz/ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE;
stats.data_file_length+= sz;
}
stats.records = sz / ROCKSDB_ASSUMED_KEY_VALUE_DISK_SIZE;
stats.data_file_length = sz;
// Second, compute memtable stats
uint64_t memtableCount;
uint64_t memtableSize;
rdb->GetApproximateMemTableStats(m_pk_descr->get_cf(), r,
&memtableCount, &memtableSize);
stats.records += memtableCount;
stats.data_file_length += memtableSize;
if (rocksdb_debug_optimizer_n_rows > 0)
stats.records = rocksdb_debug_optimizer_n_rows;
......@@ -8301,12 +8446,36 @@ ha_rocksdb::get_range(const int &i,
return myrocks::get_range(*m_key_descr_arr[i], buf);
}
static bool is_myrocks_index_empty(
rocksdb::ColumnFamilyHandle *cfh, const bool is_reverse_cf,
const rocksdb::ReadOptions &read_opts,
const uint index_id)
{
bool index_removed = false;
uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE] = {0};
rdb_netbuf_store_uint32(key_buf, index_id);
const rocksdb::Slice key =
rocksdb::Slice(reinterpret_cast<char *>(key_buf), sizeof(key_buf));
std::unique_ptr<rocksdb::Iterator> it(rdb->NewIterator(read_opts, cfh));
rocksdb_smart_seek(is_reverse_cf, it.get(), key);
if (!it->Valid()) {
index_removed = true;
} else {
if (memcmp(it->key().data(), key_buf,
Rdb_key_def::INDEX_NUMBER_SIZE)) {
// Key does not have same prefix
index_removed = true;
}
}
return index_removed;
}
/*
Drop index thread's main logic
*/
void Rdb_drop_index_thread::run() {
mysql_mutex_lock(&m_signal_mutex);
RDB_MUTEX_LOCK_CHECK(m_signal_mutex);
for (;;) {
// The stop flag might be set by shutdown command
......@@ -8331,7 +8500,7 @@ void Rdb_drop_index_thread::run() {
}
// make sure, no program error is returned
DBUG_ASSERT(ret == 0 || ret == ETIMEDOUT);
mysql_mutex_unlock(&m_signal_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex);
std::unordered_set<GL_INDEX_ID> indices;
dict_manager.get_ongoing_drop_indexes(&indices);
......@@ -8353,11 +8522,11 @@ void Rdb_drop_index_thread::run() {
DBUG_ASSERT(cfh);
const bool is_reverse_cf = cf_flags & Rdb_key_def::REVERSE_CF_FLAG;
bool index_removed = false;
uchar key_buf[Rdb_key_def::INDEX_NUMBER_SIZE] = {0};
rdb_netbuf_store_uint32(key_buf, d.index_id);
const rocksdb::Slice key =
rocksdb::Slice((char *)key_buf, sizeof(key_buf));
if (is_myrocks_index_empty(cfh, is_reverse_cf, read_opts, d.index_id))
{
finished.insert(d);
continue;
}
uchar buf[Rdb_key_def::INDEX_NUMBER_SIZE * 2];
rocksdb::Range range = get_range(d.index_id, buf, is_reverse_cf ? 1 : 0,
is_reverse_cf ? 0 : 1);
......@@ -8381,25 +8550,8 @@ void Rdb_drop_index_thread::run() {
}
rdb_handle_io_error(status, RDB_IO_ERROR_BG_THREAD);
}
std::unique_ptr<rocksdb::Iterator> it(rdb->NewIterator(read_opts, cfh));
it->Seek(key);
if (is_reverse_cf) {
if (!it->Valid()) {
it->SeekToLast();
} else {
it->Prev();
}
}
if (!it->Valid()) {
index_removed = true;
} else {
if (memcmp(it->key().data(), key_buf,
Rdb_key_def::INDEX_NUMBER_SIZE)) {
// Key does not have same prefix
index_removed = true;
}
}
if (index_removed) {
if (is_myrocks_index_empty(cfh, is_reverse_cf, read_opts, d.index_id))
{
finished.insert(d);
}
}
......@@ -8408,10 +8560,10 @@ void Rdb_drop_index_thread::run() {
dict_manager.finish_drop_indexes(finished);
}
}
mysql_mutex_lock(&m_signal_mutex);
RDB_MUTEX_LOCK_CHECK(m_signal_mutex);
}
mysql_mutex_unlock(&m_signal_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex);
}
Rdb_tbl_def *ha_rocksdb::get_table_if_exists(const char *const tablename) {
......@@ -8683,10 +8835,12 @@ ha_rows ha_rocksdb::records_in_range(uint inx, key_range *const min_key,
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
// Getting statistics, including from Memtables
rdb->GetApproximateSizes(kd.get_cf(), &r, 1, &sz, true);
#pragma GCC diagnostic pop
uint8_t include_flags = rocksdb::DB::INCLUDE_FILES;
rdb->GetApproximateSizes(kd.get_cf(), &r, 1, &sz, include_flags);
ret = rows * sz / disk_size;
uint64_t memTableCount;
rdb->GetApproximateMemTableStats(kd.get_cf(), r, &memTableCount, &sz);
ret += memTableCount;
/*
GetApproximateSizes() gives estimates so ret might exceed stats.records.
......@@ -8764,6 +8918,7 @@ int ha_rocksdb::calculate_stats(const TABLE *const table_arg, THD *const thd,
std::unordered_map<rocksdb::ColumnFamilyHandle *, std::vector<rocksdb::Range>>
ranges;
std::unordered_set<GL_INDEX_ID> ids_to_check;
std::unordered_map<GL_INDEX_ID, uint> ids_to_keyparts;
std::vector<uchar> buf(table_arg->s->keys * 2 *
Rdb_key_def::INDEX_NUMBER_SIZE);
for (uint i = 0; i < table_arg->s->keys; i++) {
......@@ -8771,6 +8926,7 @@ int ha_rocksdb::calculate_stats(const TABLE *const table_arg, THD *const thd,
const Rdb_key_def &kd = *m_key_descr_arr[i];
ranges[kd.get_cf()].push_back(get_range(i, bufp));
ids_to_check.insert(kd.get_gl_index_id());
ids_to_keyparts[kd.get_gl_index_id()] = kd.get_key_parts();
}
// for analyze statements, force flush on memtable to get accurate cardinality
......@@ -8800,6 +8956,8 @@ int ha_rocksdb::calculate_stats(const TABLE *const table_arg, THD *const thd,
// Initialize the stats to 0. If there are no files that contain
// this gl_index_id, then 0 should be stored for the cached stats.
stats[it] = Rdb_index_stats(it);
DBUG_ASSERT(ids_to_keyparts.count(it) > 0);
stats[it].m_distinct_keys_per_prefix.resize(ids_to_keyparts[it]);
}
for (const auto &it : props) {
std::vector<Rdb_index_stats> sst_stats;
......@@ -9271,6 +9429,15 @@ int ha_rocksdb::inplace_populate_sk(
dict_manager.add_create_index(create_index_ids, batch);
dict_manager.commit(batch);
/*
Add uncommitted key definitons to ddl_manager. We need to do this
so that the property collector can find this keydef when it needs to
update stats. The property collector looks for the keydef in the
data dictionary, but it won't be there yet since this key definition
is still in the creation process.
*/
ddl_manager.add_uncommitted_keydefs(indexes);
const bool hidden_pk_exists = has_hidden_pk(table);
int res = 0;
......@@ -9489,6 +9656,8 @@ bool ha_rocksdb::commit_inplace_alter_table(
Rdb_inplace_alter_ctx *const ctx0 =
static_cast<Rdb_inplace_alter_ctx *>(ha_alter_info->handler_ctx);
DEBUG_SYNC(ha_thd(), "rocksdb.commit_in_place_alter_table");
/*
IMPORTANT: When rollback is requested, mysql will abort with
an assertion failure. That means every failed commit during inplace alter
......@@ -9523,6 +9692,9 @@ bool ha_rocksdb::commit_inplace_alter_table(
delete ctx0->m_new_tdef;
}
/* Remove uncommitted key definitons from ddl_manager */
ddl_manager.remove_uncommitted_keydefs(ctx0->m_added_indexes);
/* Rollback any partially created indexes */
dict_manager.rollback_ongoing_index_creation();
......@@ -9582,6 +9754,12 @@ bool ha_rocksdb::commit_inplace_alter_table(
*/
DBUG_ASSERT(0);
}
/*
Remove uncommitted key definitons from ddl_manager, as they are now
committed into the data dictionary.
*/
ddl_manager.remove_uncommitted_keydefs(ctx->m_added_indexes);
}
if (dict_manager.commit(batch)) {
......@@ -9750,6 +9928,7 @@ static void myrocks_update_status() {
export_stats.rows_inserted = global_stats.rows[ROWS_INSERTED];
export_stats.rows_read = global_stats.rows[ROWS_READ];
export_stats.rows_updated = global_stats.rows[ROWS_UPDATED];
export_stats.rows_deleted_blind = global_stats.rows[ROWS_DELETED_BLIND];
export_stats.system_rows_deleted = global_stats.system_rows[ROWS_DELETED];
export_stats.system_rows_inserted = global_stats.system_rows[ROWS_INSERTED];
......@@ -9765,6 +9944,8 @@ static SHOW_VAR myrocks_status_variables[] = {
DEF_STATUS_VAR_FUNC("rows_read", &export_stats.rows_read, SHOW_LONGLONG),
DEF_STATUS_VAR_FUNC("rows_updated", &export_stats.rows_updated,
SHOW_LONGLONG),
DEF_STATUS_VAR_FUNC("rows_deleted_blind",
&export_stats.rows_deleted_blind, SHOW_LONGLONG),
DEF_STATUS_VAR_FUNC("system_rows_deleted",
&export_stats.system_rows_deleted, SHOW_LONGLONG),
DEF_STATUS_VAR_FUNC("system_rows_inserted",
......@@ -9870,7 +10051,7 @@ void Rdb_background_thread::run() {
// Wait until the next timeout or until we receive a signal to stop the
// thread. Request to stop the thread should only be triggered when the
// storage engine is being unloaded.
mysql_mutex_lock(&m_signal_mutex);
RDB_MUTEX_LOCK_CHECK(m_signal_mutex);
const auto ret MY_ATTRIBUTE((__unused__)) =
mysql_cond_timedwait(&m_signal_cond, &m_signal_mutex, &ts_next_sync);
......@@ -9879,7 +10060,7 @@ void Rdb_background_thread::run() {
const bool local_stop = m_stop;
const bool local_save_stats = m_save_stats;
reset();
mysql_mutex_unlock(&m_signal_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex);
if (local_stop) {
// If we're here then that's because condition variable was signaled by
......@@ -9963,11 +10144,8 @@ bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd,
shorter require all parts of the key to be available
for the short key match.
*/
if (use_all_keys && prefix_extractor->InRange(eq_cond))
can_use = true;
else if (!is_ascending)
can_use = false;
else if (prefix_extractor->SameResultWhenAppended(eq_cond))
if ((use_all_keys && prefix_extractor->InRange(eq_cond))
|| prefix_extractor->SameResultWhenAppended(eq_cond))
can_use = true;
else
can_use = false;
......@@ -10138,7 +10316,7 @@ void rocksdb_set_table_stats_sampling_pct(
my_core::THD *const thd MY_ATTRIBUTE((__unused__)),
my_core::st_mysql_sys_var *const var MY_ATTRIBUTE((__unused__)),
void *const var_ptr MY_ATTRIBUTE((__unused__)), const void *const save) {
mysql_mutex_lock(&rdb_sysvars_mutex);
RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex);
const uint32_t new_val = *static_cast<const uint32_t *>(save);
......@@ -10151,7 +10329,7 @@ void rocksdb_set_table_stats_sampling_pct(
}
}
mysql_mutex_unlock(&rdb_sysvars_mutex);
RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex);
}
/*
......@@ -10185,6 +10363,15 @@ void rocksdb_set_rate_limiter_bytes_per_sec(
}
}
void rocksdb_set_delayed_write_rate(THD *thd, struct st_mysql_sys_var *var,
void *var_ptr, const void *save) {
const uint64_t new_val = *static_cast<const uint64_t *>(save);
if (rocksdb_delayed_write_rate != new_val) {
rocksdb_delayed_write_rate = new_val;
rocksdb_db_options.delayed_write_rate = new_val;
}
}
void rdb_set_collation_exception_list(const char *const exception_list) {
DBUG_ASSERT(rdb_collation_exceptions != nullptr);
......@@ -10200,7 +10387,7 @@ void rocksdb_set_collation_exception_list(THD *const thd,
const void *const save) {
const char *const val = *static_cast<const char *const *>(save);
rdb_set_collation_exception_list(val);
rdb_set_collation_exception_list(val == nullptr ? "" : val);
*static_cast<const char **>(var_ptr) = val;
}
......@@ -10229,13 +10416,15 @@ static void rocksdb_set_max_background_compactions(
const void *const save) {
DBUG_ASSERT(save != nullptr);
mysql_mutex_lock(&rdb_sysvars_mutex);
RDB_MUTEX_LOCK_CHECK(rdb_sysvars_mutex);
rocksdb_db_options.max_background_compactions =
*static_cast<const int *>(save);
rocksdb_db_options.env->SetBackgroundThreads(
rocksdb_db_options.max_background_compactions,
rocksdb::Env::Priority::LOW);
mysql_mutex_unlock(&rdb_sysvars_mutex);
RDB_MUTEX_UNLOCK_CHECK(rdb_sysvars_mutex);
}
void rdb_queue_save_stats_request() { rdb_bg_thread.request_save_stats(); }
......
......@@ -121,6 +121,34 @@ const char *const BG_THREAD_NAME = "myrocks-bg";
*/
const char *const INDEX_THREAD_NAME = "myrocks-index";
/*
Separator between partition name and the qualifier. Sample usage:
- p0_cfname=foo
- p3_tts_col=bar
*/
const char RDB_PER_PARTITION_QUALIFIER_NAME_SEP = '_';
/*
Separator between qualifier name and value. Sample usage:
- p0_cfname=foo
- p3_tts_col=bar
*/
const char RDB_PER_PARTITION_QUALIFIER_VALUE_SEP = '=';
/*
Separator between multiple qualifier assignments. Sample usage:
- p0_cfname=foo;p1_cfname=bar;p2_cfname=baz
*/
const char RDB_QUALIFIER_SEP = ';';
/*
Qualifier name for a custom per partition column family.
*/
const char *const RDB_CF_NAME_QUALIFIER = "cfname";
/*
Default, minimal valid, and maximum valid sampling rate values when collecting
statistics about table.
......@@ -192,7 +220,9 @@ const char *const INDEX_THREAD_NAME = "myrocks-index";
#define HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED (HA_ERR_LAST + 1)
#define HA_ERR_ROCKSDB_PK_REQUIRED (HA_ERR_LAST + 2)
#define HA_ERR_ROCKSDB_TOO_MANY_LOCKS (HA_ERR_LAST + 3)
#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TOO_MANY_LOCKS
#define HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED (HA_ERR_LAST + 4)
#define HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED (HA_ERR_LAST + 5)
#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED
inline bool looks_like_per_index_cf_typo(const char *const name) {
return (name && name[0] == '$' && strcmp(name, PER_INDEX_CF_NAME));
......@@ -258,6 +288,7 @@ enum operation_type {
ROWS_INSERTED,
ROWS_READ,
ROWS_UPDATED,
ROWS_DELETED_BLIND,
ROWS_MAX
};
......@@ -282,6 +313,7 @@ struct st_export_stats {
ulonglong rows_inserted;
ulonglong rows_read;
ulonglong rows_updated;
ulonglong rows_deleted_blind;
ulonglong system_rows_deleted;
ulonglong system_rows_inserted;
......@@ -474,13 +506,13 @@ class ha_rocksdb : public my_core::handler {
const rocksdb::Slice &key, std::string *const value) const;
int get_row_by_rowid(uchar *const buf, const char *const rowid,
const uint rowid_size)
const uint rowid_size, const bool skip_lookup = false)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int get_row_by_rowid(uchar *const buf, const uchar *const rowid,
const uint rowid_size)
const uint rowid_size, const bool skip_lookup = false)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)) {
return get_row_by_rowid(buf, reinterpret_cast<const char *>(rowid),
rowid_size);
rowid_size, skip_lookup);
}
void update_auto_incr_val();
......@@ -491,6 +523,7 @@ class ha_rocksdb : public my_core::handler {
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
bool can_use_single_delete(const uint &index) const
MY_ATTRIBUTE((__warn_unused_result__));
bool is_blind_delete_enabled();
bool skip_unique_check() const MY_ATTRIBUTE((__warn_unused_result__));
void set_force_skip_unique_check(bool skip) override;
bool commit_in_the_middle() MY_ATTRIBUTE((__warn_unused_result__));
......@@ -667,6 +700,21 @@ class ha_rocksdb : public my_core::handler {
int rename_table(const char *const from, const char *const to) override
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int convert_blob_from_storage_format(my_core::Field_blob *const blob,
Rdb_string_reader *const reader,
bool decode)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int convert_varchar_from_storage_format(
my_core::Field_varstring *const field_var,
Rdb_string_reader *const reader, bool decode)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int convert_field_from_storage_format(my_core::Field *const field,
Rdb_string_reader *const reader,
bool decode, uint len)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int convert_record_from_storage_format(const rocksdb::Slice *const key,
const rocksdb::Slice *const value,
uchar *const buf)
......@@ -681,6 +729,17 @@ class ha_rocksdb : public my_core::handler {
rocksdb::Slice *const packed_rec)
MY_ATTRIBUTE((__nonnull__));
static const std::string gen_cf_name_qualifier_for_partition(
const std::string &s);
static const std::vector<std::string> parse_into_tokens(const std::string &s,
const char delim);
static const std::string generate_cf_name(const uint index,
const TABLE *const table_arg,
const Rdb_tbl_def *const tbl_def_arg,
bool *per_part_match_found);
static const char *get_key_name(const uint index,
const TABLE *const table_arg,
const Rdb_tbl_def *const tbl_def_arg)
......@@ -702,7 +761,6 @@ class ha_rocksdb : public my_core::handler {
static bool is_pk(const uint index, const TABLE *table_arg,
const Rdb_tbl_def *tbl_def_arg)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
/** @brief
unireg.cc will call max_supported_record_length(), max_supported_keys(),
max_supported_key_parts(), uint max_supported_key_length()
......@@ -827,6 +885,7 @@ class ha_rocksdb : public my_core::handler {
rocksdb::ColumnFamilyHandle *cf_handle;
bool is_reverse_cf;
bool is_auto_cf;
bool is_per_partition_cf;
};
struct update_row_info {
......@@ -946,10 +1005,8 @@ class ha_rocksdb : public my_core::handler {
int read_before_key(const Rdb_key_def &kd, const bool &using_full_key,
const rocksdb::Slice &key_slice)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int read_after_key(const Rdb_key_def &kd, const bool &using_full_key,
const rocksdb::Slice &key_slice)
int read_after_key(const Rdb_key_def &kd, const rocksdb::Slice &key_slice)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int position_to_correct_key(
const Rdb_key_def &kd, const enum ha_rkey_function &find_flag,
const bool &full_key_match, const uchar *const key,
......
......@@ -23,6 +23,10 @@ namespace myrocks {
class Rdb_logger : public rocksdb::Logger {
public:
explicit Rdb_logger(const rocksdb::InfoLogLevel log_level =
rocksdb::InfoLogLevel::ERROR_LEVEL)
: m_mysql_log_level(log_level) {}
void Logv(const rocksdb::InfoLogLevel log_level, const char *format,
va_list ap) override {
DBUG_ASSERT(format != nullptr);
......@@ -33,7 +37,7 @@ class Rdb_logger : public rocksdb::Logger {
m_logger->Logv(log_level, format, ap);
}
if (log_level < GetInfoLogLevel()) {
if (log_level < m_mysql_log_level) {
return;
}
......@@ -61,8 +65,21 @@ class Rdb_logger : public rocksdb::Logger {
m_logger = logger;
}
void SetInfoLogLevel(const rocksdb::InfoLogLevel log_level) override {
// The InfoLogLevel for the logger is used by rocksdb to filter
// messages, so it needs to be the lower of the two loggers
rocksdb::InfoLogLevel base_level = log_level;
if (m_logger && m_logger->GetInfoLogLevel() < base_level) {
base_level = m_logger->GetInfoLogLevel();
}
rocksdb::Logger::SetInfoLogLevel(base_level);
m_mysql_log_level = log_level;
}
private:
std::shared_ptr<rocksdb::Logger> m_logger;
rocksdb::InfoLogLevel m_mysql_log_level;
};
} // namespace myrocks
......@@ -5,7 +5,7 @@ USE mysqlslap;
CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE=rocksdb;
# 2PC enabled, MyRocks durability enabled
SET GLOBAL rocksdb_enable_2pc=0;
SET GLOBAL rocksdb_write_sync=1;
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
## 2PC + durability + single thread
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
select case when variable_value-@c = 1000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
......@@ -18,7 +18,7 @@ case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' else '
false
# 2PC enabled, MyRocks durability disabled
SET GLOBAL rocksdb_enable_2pc=0;
SET GLOBAL rocksdb_write_sync=0;
SET GLOBAL rocksdb_flush_log_at_trx_commit=0;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
case when variable_value-@c = 0 then 'true' else 'false' end
......@@ -29,7 +29,7 @@ case when variable_value-@c = 0 then 'true' else 'false' end
true
# 2PC disabled, MyRocks durability enabled
SET GLOBAL rocksdb_enable_2pc=1;
SET GLOBAL rocksdb_write_sync=1;
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
case when variable_value-@c = 0 then 'true' else 'false' end
......@@ -39,6 +39,6 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa
case when variable_value-@c = 0 then 'true' else 'false' end
false
SET GLOBAL rocksdb_enable_2pc=1;
SET GLOBAL rocksdb_write_sync=0;
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
DROP TABLE t1;
DROP DATABASE mysqlslap;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (i INT PRIMARY KEY, j INT) ENGINE = ROCKSDB;
INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10);
SET debug_sync= 'rocksdb.commit_in_place_alter_table WAIT_FOR flushed';
ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
SET GLOBAL rocksdb_force_flush_memtable_now = 1;
SET debug_sync= 'now SIGNAL flushed';
SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
WHERE INDEX_NUMBER =
(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY");
COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX
# # SSTNAME 5 # # # # # 5
SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
WHERE INDEX_NUMBER =
(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
WHERE TABLE_NAME = 't1' AND INDEX_NAME = "kj");
COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX
# # SSTNAME 5 # # # # # 5,5
SET debug_sync='RESET';
DROP TABLE t1;
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
set @save_rocksdb_blind_delete_primary_key=@@session.rocksdb_blind_delete_primary_key;
set @save_rocksdb_master_skip_tx_api=@@session.rocksdb_master_skip_tx_api;
DROP TABLE IF EXISTS t1,t2;
create table t1 (id int primary key, value int, value2 varchar(200)) engine=rocksdb;
create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb;
SET session rocksdb_blind_delete_primary_key=1;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
variable_value-@c
1000
SELECT count(*) FROM t1;
count(*)
9000
include/sync_slave_sql_with_master.inc
SELECT count(*) FROM t1;
count(*)
9000
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
variable_value-@c
0
SELECT count(*) FROM t2;
count(*)
9000
SET session rocksdb_master_skip_tx_api=1;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
variable_value-@c
1000
SELECT count(*) FROM t1;
count(*)
8000
SELECT count(*) FROM t2;
count(*)
8000
include/sync_slave_sql_with_master.inc
SELECT count(*) FROM t1;
count(*)
8000
SELECT count(*) FROM t2;
count(*)
8000
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
DELETE FROM t1 WHERE id BETWEEN 3001 AND 4000;
DELETE FROM t2 WHERE id BETWEEN 3001 AND 4000;
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
variable_value-@c
0
SELECT count(*) FROM t1;
count(*)
7000
SELECT count(*) FROM t2;
count(*)
7000
include/sync_slave_sql_with_master.inc
SELECT count(*) FROM t1;
count(*)
7000
SELECT count(*) FROM t2;
count(*)
7000
DELETE FROM t1 WHERE id = 10;
SELECT count(*) FROM t1;
count(*)
7000
call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table test.t1.*Error_code.*");
call mtr.add_suppression("Slave: Can't find record in 't1'.*");
include/wait_for_slave_sql_error.inc [errno=1032]
set @save_rocksdb_read_free_rpl_tables=@@global.rocksdb_read_free_rpl_tables;
set global rocksdb_read_free_rpl_tables="t.*";
START SLAVE;
include/sync_slave_sql_with_master.inc
SELECT count(*) FROM t1;
count(*)
7000
set global rocksdb_read_free_rpl_tables=@save_rocksdb_read_free_rpl_tables;
SET session rocksdb_blind_delete_primary_key=@save_rocksdb_blind_delete_primary_key;
SET session rocksdb_master_skip_tx_api=@save_rocksdb_master_skip_tx_api;
DROP TABLE t1, t2;
include/rpl_end.inc
DROP TABLE IF EXISTS t1, t2;
DROP TABLE IF EXISTS t1, t2, t3;
CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin';
CREATE TABLE t2(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin';
CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'
......@@ -19,9 +19,9 @@ LOAD DATA INFILE <input_file> INTO TABLE t3;
set rocksdb_bulk_load=0;
SHOW TABLE STATUS WHERE name LIKE 't%';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t2 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t3 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
ANALYZE TABLE t1, t2, t3;
Table Op Msg_type Msg_text
test.t1 analyze status OK
......@@ -29,36 +29,36 @@ test.t2 analyze status OK
test.t3 analyze status OK
SHOW TABLE STATUS WHERE name LIKE 't%';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t2 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t3 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
select count(pk) from t1;
count(pk)
10000000
5000000
select count(a) from t1;
count(a)
10000000
5000000
select count(b) from t1;
count(b)
10000000
5000000
select count(pk) from t2;
count(pk)
10000000
5000000
select count(a) from t2;
count(a)
10000000
5000000
select count(b) from t2;
count(b)
10000000
5000000
select count(pk) from t3;
count(pk)
10000000
5000000
select count(a) from t3;
count(a)
10000000
5000000
select count(b) from t3;
count(b)
10000000
5000000
longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp
test.bulk_load.tmp
DROP TABLE t1, t2, t3;
......@@ -125,4 +125,5 @@ CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=ro
CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
ERROR HY000: Unsupported collation on string indexed column test.abcd.value Use binary collation (binary, latin1_bin, utf8_bin).
DROP TABLE abc;
SET GLOBAL rocksdb_strict_collation_exceptions=null;
SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT '__system__') ENGINE = ROCKSDB;
ERROR HY000: Incorrect arguments to column family not valid for storing index data
ERROR HY000: Incorrect arguments to column family not valid for storing index data.
DROP TABLE IF EXISTS t1;
......@@ -66,7 +66,7 @@ Handler_read_prev 0
Handler_read_rnd 0
Handler_read_rnd_next 10
FLUSH STATUS;
SELECT * FROM t1 WHERE b <=5 ORDER BY b;
SELECT * FROM t1 FORCE INDEX(b) WHERE b <=5 ORDER BY b;
id a b
4 NULL 4
5 NULL 5
......
......@@ -22,7 +22,7 @@ insert into linktable (id1, link_type, id2) values (2, 1, 7);
insert into linktable (id1, link_type, id2) values (2, 1, 8);
insert into linktable (id1, link_type, id2) values (2, 1, 9);
insert into linktable (id1, link_type, id2) values (2, 1, 10);
explain select id1, id2, link_type, data from linktable where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5);
explain select id1, id2, link_type, data from linktable force index(primary) where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE linktable range PRIMARY,id1_type PRIMARY 24 NULL # Using where
1 SIMPLE linktable range PRIMARY PRIMARY 24 NULL # Using where
drop table linktable;
drop table if exists t;
Warnings:
Note 1051 Unknown table 'test.t'
create table t (
a int,
b int,
c varchar(12249) collate latin1_bin,
d datetime,
e int,
f int,
g blob,
h int,
i int,
key (b,e),
key (h,b)
) engine=rocksdb
partition by linear hash (i) partitions 8 ;
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
select i from t group by h;
i
1
select i from t group by h;
i
1
drop table t;
......@@ -124,6 +124,51 @@ UNLOCK TABLES;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
2
==== mysqldump with --innodb-stats-on-metadata ====
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=3893;
DROP TABLE IF EXISTS `r1`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `r1` (
`id1` int(11) NOT NULL DEFAULT '0',
`id2` int(11) NOT NULL DEFAULT '0',
`id3` varchar(100) NOT NULL DEFAULT '',
`id4` int(11) NOT NULL DEFAULT '0',
`value1` int(11) DEFAULT NULL,
`value2` int(11) DEFAULT NULL,
`value3` int(11) DEFAULT NULL,
`value4` int(11) DEFAULT NULL,
PRIMARY KEY (`id1`,`id2`,`id3`,`id4`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1;
/*!40101 SET character_set_client = @saved_cs_client */;
/* ORDERING KEY : (null) */;
LOCK TABLES `r1` WRITE;
/*!40000 ALTER TABLE `r1` DISABLE KEYS */;
INSERT INTO `r1` VALUES (1,1,'1',1,1,1,1,1),(1,1,'1',2,2,2,2,2),(1,1,'2',1,3,3,3,3),(1,1,'2',2,4,4,4,4),(1,2,'1',1,5,5,5,5),(1,2,'1',2,6,6,6,6),(1,2,'2',1,7,7,7,7),(1,2,'2',2,8,8,8,8),(2,1,'1',1,9,9,9,9),(2,1,'1',2,10,10,10,10),(2,1,'2',1,11,11,11,11),(2,1,'2',2,12,12,12,12),(2,2,'1',1,13,13,13,13),(2,2,'1',2,14,14,14,14),(2,2,'2',1,15,15,15,15),(2,2,'2',2,16,16,16,16);
/*!40000 ALTER TABLE `r1` ENABLE KEYS */;
UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
SET GLOBAL binlog_format=statement;
SET GLOBAL binlog_format=row;
drop table r1;
......
......@@ -2,7 +2,55 @@ DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS VAR_POP;
DROP TABLE IF EXISTS TEMP0;
DROP TABLE IF EXISTS VAR_SAMP;
DROP TABLE IF EXISTS ti;
DROP TABLE IF EXISTS members;
DROP TABLE IF EXISTS members_2;
DROP TABLE IF EXISTS employees;
DROP TABLE IF EXISTS employees_2;
DROP TABLE IF EXISTS employees_3;
DROP TABLE IF EXISTS quarterly_report_status;
DROP TABLE IF EXISTS employees_4;
DROP TABLE IF EXISTS h2;
DROP TABLE IF EXISTS rcx;
DROP TABLE IF EXISTS r1;
DROP TABLE IF EXISTS rc1;
DROP TABLE IF EXISTS rx;
DROP TABLE IF EXISTS rc2;
DROP TABLE IF EXISTS rc3;
DROP TABLE IF EXISTS rc4;
DROP TABLE IF EXISTS employees_by_lname;
DROP TABLE IF EXISTS customers_1;
DROP TABLE IF EXISTS customers_2;
DROP TABLE IF EXISTS customers_3;
DROP TABLE IF EXISTS employees_hash;
DROP TABLE IF EXISTS employees_hash_1;
DROP TABLE IF EXISTS t1_hash;
DROP TABLE IF EXISTS employees_linear_hash;
DROP TABLE IF EXISTS t1_linear_hash;
DROP TABLE IF EXISTS k1;
DROP TABLE IF EXISTS k2;
DROP TABLE IF EXISTS tm1;
DROP TABLE IF EXISTS tk;
DROP TABLE IF EXISTS ts;
DROP TABLE IF EXISTS ts_1;
DROP TABLE IF EXISTS ts_3;
DROP TABLE IF EXISTS ts_4;
DROP TABLE IF EXISTS ts_5;
DROP TABLE IF EXISTS trb3;
DROP TABLE IF EXISTS tr;
DROP TABLE IF EXISTS members_3;
DROP TABLE IF EXISTS clients;
DROP TABLE IF EXISTS clients_lk;
DROP TABLE IF EXISTS trb1;
CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i)) ENGINE = ROCKSDB PARTITION BY KEY(i) PARTITIONS 4;
Table Op Msg_type Msg_text
test.t1 optimize status OK
Table Op Msg_type Msg_text
test.t1 analyze status OK
Table Op Msg_type Msg_text
test.t1 repair status OK
Table Op Msg_type Msg_text
test.t1 check status OK
SHOW TABLES;
Tables_in_test
TEMP0
......@@ -24,7 +72,614 @@ i j k
SELECT COUNT(*) FROM t1;
COUNT(*)
1000
DROP TABLE t1;
DROP TABLE VAR_POP;
DROP TABLE TEMP0;
DROP TABLE VAR_SAMP;
CREATE TABLE ti(
id INT,
amount DECIMAL(7,2),
tr_date DATE
) ENGINE=ROCKSDB
PARTITION BY HASH(MONTH(tr_date))
PARTITIONS 6;
CREATE TABLE members (
firstname VARCHAR(25) NOT NULL,
lastname VARCHAR(25) NOT NULL,
username VARCHAR(16) NOT NULL,
email VARCHAR(35),
joined DATE NOT NULL
) ENGINE=ROCKSDB
PARTITION BY KEY(joined)
PARTITIONS 6;
CREATE TABLE members_2 (
firstname VARCHAR(25) NOT NULL,
lastname VARCHAR(25) NOT NULL,
username VARCHAR(16) NOT NULL,
email VARCHAR(35),
joined DATE NOT NULL
) ENGINE=ROCKSDB
PARTITION BY RANGE(YEAR(joined)) (
PARTITION p0 VALUES LESS THAN (1960),
PARTITION p1 VALUES LESS THAN (1970),
PARTITION p2 VALUES LESS THAN (1980),
PARTITION p3 VALUES LESS THAN (1990),
PARTITION p4 VALUES LESS THAN MAXVALUE
);
CREATE TABLE t2 (val INT)
ENGINE=ROCKSDB
PARTITION BY LIST(val)(
PARTITION mypart VALUES IN (1,3,5),
PARTITION MyPart VALUES IN (2,4,6)
);
ERROR HY000: Duplicate partition name MyPart
CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT NOT NULL,
store_id INT NOT NULL
) ENGINE=ROCKSDB
PARTITION BY RANGE (store_id) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11),
PARTITION p2 VALUES LESS THAN (16),
PARTITION p3 VALUES LESS THAN MAXVALUE
);
CREATE TABLE employees_2 (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT NOT NULL,
store_id INT NOT NULL
) ENGINE=ROCKSDB
PARTITION BY RANGE (job_code) (
PARTITION p0 VALUES LESS THAN (100),
PARTITION p1 VALUES LESS THAN (1000),
PARTITION p2 VALUES LESS THAN (10000)
);
CREATE TABLE employees_3 (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
) ENGINE=ROCKSDB
PARTITION BY RANGE (YEAR(separated)) (
PARTITION p0 VALUES LESS THAN (1991),
PARTITION p1 VALUES LESS THAN (1996),
PARTITION p2 VALUES LESS THAN (2001),
PARTITION p3 VALUES LESS THAN MAXVALUE
);
CREATE TABLE quarterly_report_status (
report_id INT NOT NULL,
report_status VARCHAR(20) NOT NULL,
report_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
) ENGINE=ROCKSDB
PARTITION BY RANGE (UNIX_TIMESTAMP(report_updated)) (
PARTITION p0 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-01-01 00:00:00') ),
PARTITION p1 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-04-01 00:00:00') ),
PARTITION p2 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-07-01 00:00:00') ),
PARTITION p3 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-10-01 00:00:00') ),
PARTITION p4 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-01-01 00:00:00') ),
PARTITION p5 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-04-01 00:00:00') ),
PARTITION p6 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-07-01 00:00:00') ),
PARTITION p7 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-10-01 00:00:00') ),
PARTITION p8 VALUES LESS THAN ( UNIX_TIMESTAMP('2010-01-01 00:00:00') ),
PARTITION p9 VALUES LESS THAN (MAXVALUE)
);
CREATE TABLE employees_4 (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
) ENGINE=ROCKSDB
PARTITION BY LIST(store_id) (
PARTITION pNorth VALUES IN (3,5,6,9,17),
PARTITION pEast VALUES IN (1,2,10,11,19,20),
PARTITION pWest VALUES IN (4,12,13,14,18),
PARTITION pCentral VALUES IN (7,8,15,16)
);
CREATE TABLE h2 (
c1 INT,
c2 INT
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION p0 VALUES IN (1, 4, 7),
PARTITION p1 VALUES IN (2, 5, 8)
);
INSERT INTO h2 VALUES (3, 5);
ERROR HY000: Table has no partition for value 3
CREATE TABLE rcx (
a INT,
b INT,
c CHAR(3),
d INT
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS(a,d,c) (
PARTITION p0 VALUES LESS THAN (5,10,'ggg'),
PARTITION p1 VALUES LESS THAN (10,20,'mmm'),
PARTITION p2 VALUES LESS THAN (15,30,'sss'),
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE)
);
CREATE TABLE r1 (
a INT,
b INT
) ENGINE=ROCKSDB
PARTITION BY RANGE (a) (
PARTITION p0 VALUES LESS THAN (5),
PARTITION p1 VALUES LESS THAN (MAXVALUE)
);
INSERT INTO r1 VALUES (5,10), (5,11), (5,12);
CREATE TABLE rc1 (
a INT,
b INT
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS(a, b) (
PARTITION p0 VALUES LESS THAN (5, 12),
PARTITION p3 VALUES LESS THAN (MAXVALUE, MAXVALUE)
);
INSERT INTO rc1 VALUES (5,10), (5,11), (5,12);
SELECT (5,10) < (5,12), (5,11) < (5,12), (5,12) < (5,12);
(5,10) < (5,12) (5,11) < (5,12) (5,12) < (5,12)
1 1 0
CREATE TABLE rx (
a INT,
b INT
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS (a) (
PARTITION p0 VALUES LESS THAN (5),
PARTITION p1 VALUES LESS THAN (MAXVALUE)
);
INSERT INTO rx VALUES (5,10), (5,11), (5,12);
CREATE TABLE rc2 (
a INT,
b INT
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS(a,b) (
PARTITION p0 VALUES LESS THAN (0,10),
PARTITION p1 VALUES LESS THAN (10,20),
PARTITION p2 VALUES LESS THAN (10,30),
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE)
);
CREATE TABLE rc3 (
a INT,
b INT
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS(a,b) (
PARTITION p0 VALUES LESS THAN (0,10),
PARTITION p1 VALUES LESS THAN (10,20),
PARTITION p2 VALUES LESS THAN (10,30),
PARTITION p3 VALUES LESS THAN (10,35),
PARTITION p4 VALUES LESS THAN (20,40),
PARTITION p5 VALUES LESS THAN (MAXVALUE,MAXVALUE)
);
CREATE TABLE rc4 (
a INT,
b INT,
c INT
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS(a,b,c) (
PARTITION p0 VALUES LESS THAN (0,25,50),
PARTITION p1 VALUES LESS THAN (10,20,100),
PARTITION p2 VALUES LESS THAN (10,30,50),
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE)
);
SELECT (0,25,50) < (10,20,100), (10,20,100) < (10,30,50);
(0,25,50) < (10,20,100) (10,20,100) < (10,30,50)
1 1
CREATE TABLE rcf (
a INT,
b INT,
c INT
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS(a,b,c) (
PARTITION p0 VALUES LESS THAN (0,25,50),
PARTITION p1 VALUES LESS THAN (20,20,100),
PARTITION p2 VALUES LESS THAN (10,30,50),
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE)
);
ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition
CREATE TABLE employees_by_lname (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT NOT NULL,
store_id INT NOT NULL
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS (lname) (
PARTITION p0 VALUES LESS THAN ('g'),
PARTITION p1 VALUES LESS THAN ('m'),
PARTITION p2 VALUES LESS THAN ('t'),
PARTITION p3 VALUES LESS THAN (MAXVALUE)
);
ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (lname) (
PARTITION p0 VALUES LESS THAN ('g'),
PARTITION p1 VALUES LESS THAN ('m'),
PARTITION p2 VALUES LESS THAN ('t'),
PARTITION p3 VALUES LESS THAN (MAXVALUE)
);
ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (hired) (
PARTITION p0 VALUES LESS THAN ('1970-01-01'),
PARTITION p1 VALUES LESS THAN ('1980-01-01'),
PARTITION p2 VALUES LESS THAN ('1990-01-01'),
PARTITION p3 VALUES LESS THAN ('2000-01-01'),
PARTITION p4 VALUES LESS THAN ('2010-01-01'),
PARTITION p5 VALUES LESS THAN (MAXVALUE)
);
CREATE TABLE customers_1 (
first_name VARCHAR(25),
last_name VARCHAR(25),
street_1 VARCHAR(30),
street_2 VARCHAR(30),
city VARCHAR(15),
renewal DATE
) ENGINE=ROCKSDB
PARTITION BY LIST COLUMNS(city) (
PARTITION pRegion_1 VALUES IN('Oskarshamn', 'Högsby', 'Mönsterås'),
PARTITION pRegion_2 VALUES IN('Vimmerby', 'Hultsfred', 'Västervik'),
PARTITION pRegion_3 VALUES IN('Nässjö', 'Eksjö', 'Vetlanda'),
PARTITION pRegion_4 VALUES IN('Uppvidinge', 'Alvesta', 'Växjo')
);
CREATE TABLE customers_2 (
first_name VARCHAR(25),
last_name VARCHAR(25),
street_1 VARCHAR(30),
street_2 VARCHAR(30),
city VARCHAR(15),
renewal DATE
) ENGINE=ROCKSDB
PARTITION BY LIST COLUMNS(renewal) (
PARTITION pWeek_1 VALUES IN('2010-02-01', '2010-02-02', '2010-02-03',
'2010-02-04', '2010-02-05', '2010-02-06', '2010-02-07'),
PARTITION pWeek_2 VALUES IN('2010-02-08', '2010-02-09', '2010-02-10',
'2010-02-11', '2010-02-12', '2010-02-13', '2010-02-14'),
PARTITION pWeek_3 VALUES IN('2010-02-15', '2010-02-16', '2010-02-17',
'2010-02-18', '2010-02-19', '2010-02-20', '2010-02-21'),
PARTITION pWeek_4 VALUES IN('2010-02-22', '2010-02-23', '2010-02-24',
'2010-02-25', '2010-02-26', '2010-02-27', '2010-02-28')
);
CREATE TABLE customers_3 (
first_name VARCHAR(25),
last_name VARCHAR(25),
street_1 VARCHAR(30),
street_2 VARCHAR(30),
city VARCHAR(15),
renewal DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS(renewal) (
PARTITION pWeek_1 VALUES LESS THAN('2010-02-09'),
PARTITION pWeek_2 VALUES LESS THAN('2010-02-15'),
PARTITION pWeek_3 VALUES LESS THAN('2010-02-22'),
PARTITION pWeek_4 VALUES LESS THAN('2010-03-01')
);
CREATE TABLE employees_hash (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
) ENGINE=ROCKSDB
PARTITION BY HASH(store_id)
PARTITIONS 4;
CREATE TABLE employees_hash_1 (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
) ENGINE=ROCKSDB
PARTITION BY HASH( YEAR(hired) )
PARTITIONS 4;
CREATE TABLE t1_hash (
col1 INT,
col2 CHAR(5),
col3 DATE
) ENGINE=ROCKSDB
PARTITION BY HASH( YEAR(col3) )
PARTITIONS 4;
CREATE TABLE employees_linear_hash (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
) ENGINE=ROCKSDB
PARTITION BY LINEAR HASH( YEAR(hired) )
PARTITIONS 4;
CREATE TABLE t1_linear_hash (
col1 INT,
col2 CHAR(5),
col3 DATE
) ENGINE=ROCKSDB
PARTITION BY LINEAR HASH( YEAR(col3) )
PARTITIONS 6;
CREATE TABLE k1 (
id INT NOT NULL PRIMARY KEY,
name VARCHAR(20)
) ENGINE=ROCKSDB
PARTITION BY KEY()
PARTITIONS 2;
CREATE TABLE k2 (
id INT NOT NULL,
name VARCHAR(20),
UNIQUE KEY (id)
) ENGINE=ROCKSDB
PARTITION BY KEY()
PARTITIONS 2;
CREATE TABLE tm1 (
s1 CHAR(32) PRIMARY KEY
) ENGINE=ROCKSDB
PARTITION BY KEY(s1)
PARTITIONS 10;
CREATE TABLE tk (
col1 INT NOT NULL,
col2 CHAR(5),
col3 DATE
) ENGINE=ROCKSDB
PARTITION BY LINEAR KEY (col1)
PARTITIONS 3;
CREATE TABLE ts (
id INT,
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(purchased) )
SUBPARTITION BY HASH( TO_DAYS(purchased) )
SUBPARTITIONS 2 (
PARTITION p0 VALUES LESS THAN (1990),
PARTITION p1 VALUES LESS THAN (2000),
PARTITION p2 VALUES LESS THAN MAXVALUE
);
CREATE TABLE ts_1 (
id INT,
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(purchased) )
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
PARTITION p0 VALUES LESS THAN (1990) (
SUBPARTITION s0,
SUBPARTITION s1
),
PARTITION p1 VALUES LESS THAN (2000) (
SUBPARTITION s2,
SUBPARTITION s3
),
PARTITION p2 VALUES LESS THAN MAXVALUE (
SUBPARTITION s4,
SUBPARTITION s5
)
);
CREATE TABLE ts_2 (
id INT,
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(purchased) )
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
PARTITION p0 VALUES LESS THAN (1990) (
SUBPARTITION s0,
SUBPARTITION s1
),
PARTITION p1 VALUES LESS THAN (2000),
PARTITION p2 VALUES LESS THAN MAXVALUE (
SUBPARTITION s2,
SUBPARTITION s3
)
);
ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '
PARTITION p2 VALUES LESS THAN MAXVALUE (
SUBPARTITION s2,
SUBPARTITION s3
)
)' at line 11
CREATE TABLE ts_3 (
id INT,
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(purchased) )
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
PARTITION p0 VALUES LESS THAN (1990) (
SUBPARTITION s0,
SUBPARTITION s1
),
PARTITION p1 VALUES LESS THAN (2000) (
SUBPARTITION s2,
SUBPARTITION s3
),
PARTITION p2 VALUES LESS THAN MAXVALUE (
SUBPARTITION s4,
SUBPARTITION s5
)
);
CREATE TABLE ts_4 (
id INT,
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(purchased) )
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
PARTITION p0 VALUES LESS THAN (1990) (
SUBPARTITION s0,
SUBPARTITION s1
),
PARTITION p1 VALUES LESS THAN (2000) (
SUBPARTITION s2,
SUBPARTITION s3
),
PARTITION p2 VALUES LESS THAN MAXVALUE (
SUBPARTITION s4,
SUBPARTITION s5
)
);
CREATE TABLE ts_5 (
id INT,
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE(YEAR(purchased))
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
PARTITION p0 VALUES LESS THAN (1990) (
SUBPARTITION s0a,
SUBPARTITION s0b
),
PARTITION p1 VALUES LESS THAN (2000) (
SUBPARTITION s1a,
SUBPARTITION s1b
),
PARTITION p2 VALUES LESS THAN MAXVALUE (
SUBPARTITION s2a,
SUBPARTITION s2b
)
);
CREATE TABLE trb3 (
id INT,
name VARCHAR(50),
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(purchased) ) (
PARTITION p0 VALUES LESS THAN (1990),
PARTITION p1 VALUES LESS THAN (1995),
PARTITION p2 VALUES LESS THAN (2000),
PARTITION p3 VALUES LESS THAN (2005)
);
ALTER TABLE trb3 PARTITION BY KEY(id) PARTITIONS 2;
CREATE TABLE tr (
id INT,
name VARCHAR(50),
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(purchased) ) (
PARTITION p0 VALUES LESS THAN (1990),
PARTITION p1 VALUES LESS THAN (1995),
PARTITION p2 VALUES LESS THAN (2000),
PARTITION p3 VALUES LESS THAN (2005)
);
INSERT INTO tr VALUES
(1, 'desk organiser', '2003-10-15'),
(2, 'CD player', '1993-11-05'),
(3, 'TV set', '1996-03-10'),
(4, 'bookcase', '1982-01-10'),
(5, 'exercise bike', '2004-05-09'),
(6, 'sofa', '1987-06-05'),
(7, 'popcorn maker', '2001-11-22'),
(8, 'aquarium', '1992-08-04'),
(9, 'study desk', '1984-09-16'),
(10, 'lava lamp', '1998-12-25');
SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31';
id name purchased
3 TV set 1996-03-10
10 lava lamp 1998-12-25
ALTER TABLE tr DROP PARTITION p2;
SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31';
id name purchased
CREATE TABLE members_3 (
id INT,
fname VARCHAR(25),
lname VARCHAR(25),
dob DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(dob) ) (
PARTITION p0 VALUES LESS THAN (1970),
PARTITION p1 VALUES LESS THAN (1980),
PARTITION p2 VALUES LESS THAN (1990)
);
ALTER TABLE members_3 ADD PARTITION (PARTITION p3 VALUES LESS THAN (2000));
ALTER TABLE members_3 ADD PARTITION (PARTITION n VALUES LESS THAN (1960));
ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition
CREATE TABLE clients (
id INT,
fname VARCHAR(30),
lname VARCHAR(30),
signed DATE
) ENGINE=ROCKSDB
PARTITION BY HASH( MONTH(signed) )
PARTITIONS 12;
ALTER TABLE clients COALESCE PARTITION 4;
CREATE TABLE clients_lk (
id INT,
fname VARCHAR(30),
lname VARCHAR(30),
signed DATE
) ENGINE=ROCKSDB
PARTITION BY LINEAR KEY(signed)
PARTITIONS 12;
ALTER TABLE clients COALESCE PARTITION 18;
ERROR HY000: Cannot remove all partitions, use DROP TABLE instead
ALTER TABLE clients ADD PARTITION PARTITIONS 6;
CREATE TABLE trb1 (
id INT,
name VARCHAR(50),
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE(id) (
PARTITION p0 VALUES LESS THAN (3),
PARTITION p1 VALUES LESS THAN (7),
PARTITION p2 VALUES LESS THAN (9),
PARTITION p3 VALUES LESS THAN (11)
);
INSERT INTO trb1 VALUES
(1, 'desk organiser', '2003-10-15'),
(2, 'CD player', '1993-11-05'),
(3, 'TV set', '1996-03-10'),
(4, 'bookcase', '1982-01-10'),
(5, 'exercise bike', '2004-05-09'),
(6, 'sofa', '1987-06-05'),
(7, 'popcorn maker', '2001-11-22'),
(8, 'aquarium', '1992-08-04'),
(9, 'study desk', '1984-09-16'),
(10, 'lava lamp', '1998-12-25');
ALTER TABLE trb1 ADD PRIMARY KEY (id);
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS VAR_POP;
DROP TABLE IF EXISTS TEMP0;
DROP TABLE IF EXISTS VAR_SAMP;
DROP TABLE IF EXISTS ti;
DROP TABLE IF EXISTS members;
DROP TABLE IF EXISTS members_2;
DROP TABLE IF EXISTS employees;
DROP TABLE IF EXISTS employees_2;
DROP TABLE IF EXISTS employees_3;
DROP TABLE IF EXISTS quarterly_report_status;
DROP TABLE IF EXISTS employees_4;
DROP TABLE IF EXISTS h2;
DROP TABLE IF EXISTS rcx;
DROP TABLE IF EXISTS r1;
DROP TABLE IF EXISTS rc1;
DROP TABLE IF EXISTS rx;
DROP TABLE IF EXISTS rc2;
DROP TABLE IF EXISTS rc3;
DROP TABLE IF EXISTS rc4;
DROP TABLE IF EXISTS employees_by_lname;
DROP TABLE IF EXISTS customers_1;
DROP TABLE IF EXISTS customers_2;
DROP TABLE IF EXISTS customers_3;
DROP TABLE IF EXISTS employees_hash;
DROP TABLE IF EXISTS employees_hash_1;
DROP TABLE IF EXISTS t1_hash;
DROP TABLE IF EXISTS employees_linear_hash;
DROP TABLE IF EXISTS t1_linear_hash;
DROP TABLE IF EXISTS k1;
DROP TABLE IF EXISTS k2;
DROP TABLE IF EXISTS tm1;
DROP TABLE IF EXISTS tk;
DROP TABLE IF EXISTS ts;
DROP TABLE IF EXISTS ts_1;
DROP TABLE IF EXISTS ts_3;
DROP TABLE IF EXISTS ts_4;
DROP TABLE IF EXISTS ts_5;
DROP TABLE IF EXISTS trb3;
DROP TABLE IF EXISTS tr;
DROP TABLE IF EXISTS members_3;
DROP TABLE IF EXISTS clients;
DROP TABLE IF EXISTS clients_lk;
DROP TABLE IF EXISTS trb1;
......@@ -864,6 +864,7 @@ rocksdb_allow_mmap_reads OFF
rocksdb_allow_mmap_writes OFF
rocksdb_background_sync OFF
rocksdb_base_background_compactions 1
rocksdb_blind_delete_primary_key OFF
rocksdb_block_cache_size 536870912
rocksdb_block_restart_interval 16
rocksdb_block_size 4096
......@@ -889,14 +890,16 @@ rocksdb_db_write_buffer_size 0
rocksdb_deadlock_detect OFF
rocksdb_debug_optimizer_no_zero_cardinality ON
rocksdb_default_cf_options
rocksdb_delayed_write_rate 16777216
rocksdb_delete_obsolete_files_period_micros 21600000000
rocksdb_disabledatasync OFF
rocksdb_enable_2pc ON
rocksdb_enable_bulk_load_api ON
rocksdb_enable_thread_tracking OFF
rocksdb_enable_write_thread_adaptive_yield OFF
rocksdb_error_if_exists OFF
rocksdb_flush_log_at_trx_commit 1
rocksdb_flush_memtable_on_analyze ON
rocksdb_force_compute_memtable_stats ON
rocksdb_force_flush_memtable_now OFF
rocksdb_force_index_records_in_range 0
rocksdb_hash_index_allow_collision ON
......@@ -908,6 +911,7 @@ rocksdb_lock_scanned_rows OFF
rocksdb_lock_wait_timeout 1
rocksdb_log_file_time_to_roll 0
rocksdb_manifest_preallocation_size 4194304
rocksdb_master_skip_tx_api OFF
rocksdb_max_background_compactions 1
rocksdb_max_background_flushes 1
rocksdb_max_log_file_size 0
......@@ -925,7 +929,7 @@ rocksdb_paranoid_checks ON
rocksdb_pause_background_work ON
rocksdb_perf_context_level 0
rocksdb_persistent_cache_path
rocksdb_persistent_cache_size 0
rocksdb_persistent_cache_size_mb 0
rocksdb_pin_l0_filter_and_index_blocks_in_cache ON
rocksdb_print_snapshot_conflict_queries OFF
rocksdb_rate_limiter_bytes_per_sec 0
......@@ -953,25 +957,37 @@ rocksdb_validate_tables 1
rocksdb_verify_row_debug_checksums OFF
rocksdb_wal_bytes_per_sync 0
rocksdb_wal_dir
rocksdb_wal_recovery_mode 2
rocksdb_wal_recovery_mode 1
rocksdb_wal_size_limit_mb 0
rocksdb_wal_ttl_seconds 0
rocksdb_whole_key_filtering ON
rocksdb_write_disable_wal OFF
rocksdb_write_ignore_missing_column_families OFF
rocksdb_write_sync OFF
create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb;
insert into t47 values (1, 'row1');
insert into t47 values (2, 'row2');
set rocksdb_bulk_load=1;
insert into t47 values (3, 'row3'),(4, 'row4');
set rocksdb_bulk_load=0;
connect con1,localhost,root,,;
set rocksdb_bulk_load=1;
insert into t47 values (10, 'row10'),(11, 'row11');
connection default;
set rocksdb_bulk_load=1;
insert into t47 values (100, 'row100'),(101, 'row101');
disconnect con1;
connection default;
set rocksdb_bulk_load=0;
select * from t47;
pk col1
1 row1
2 row2
3 row3
4 row4
10 row10
11 row11
100 row100
101 row101
drop table t47;
#
# Fix TRUNCATE over empty table (transaction is committed when it wasn't
......@@ -1410,6 +1426,7 @@ rocksdb_rows_deleted #
rocksdb_rows_inserted #
rocksdb_rows_read #
rocksdb_rows_updated #
rocksdb_rows_deleted_blind #
rocksdb_system_rows_deleted #
rocksdb_system_rows_inserted #
rocksdb_system_rows_read #
......@@ -1482,6 +1499,7 @@ ROCKSDB_ROWS_DELETED
ROCKSDB_ROWS_INSERTED
ROCKSDB_ROWS_READ
ROCKSDB_ROWS_UPDATED
ROCKSDB_ROWS_DELETED_BLIND
ROCKSDB_SYSTEM_ROWS_DELETED
ROCKSDB_SYSTEM_ROWS_INSERTED
ROCKSDB_SYSTEM_ROWS_READ
......@@ -1556,6 +1574,7 @@ ROCKSDB_ROWS_DELETED
ROCKSDB_ROWS_INSERTED
ROCKSDB_ROWS_READ
ROCKSDB_ROWS_UPDATED
ROCKSDB_ROWS_DELETED_BLIND
ROCKSDB_SYSTEM_ROWS_DELETED
ROCKSDB_SYSTEM_ROWS_INSERTED
ROCKSDB_SYSTEM_ROWS_READ
......@@ -1737,7 +1756,7 @@ key1 int,
PRIMARY KEY (id),
index (key1) comment 'test.t1.key1'
) engine=rocksdb;
ERROR HY000: Column Family Flag is different from existing flag. Assign a new CF flag, or do not change existing CF flag.
ERROR HY000: Column family ('test.t1.key1') flag (0) is different from an existing flag (2). Assign a new CF flag, or do not change existing CF flag.
create table t1_err (
id int not null,
key1 int,
......@@ -1763,7 +1782,7 @@ key1 int,
PRIMARY KEY (id),
index (key1) comment '$per_idnex_cf'
)engine=rocksdb;
ERROR 42000: This version of MySQL doesn't yet support 'column family name looks like a typo of $per_index_cf'
ERROR 42000: This version of MySQL doesn't yet support 'column family name looks like a typo of $per_index_cf.'
#
# Issue #22: SELECT ... FOR UPDATE takes a long time
#
......
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'testcomment'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='testcomment';
cf_name
DROP TABLE t1;
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'rev:testrevcomment'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:testrevcomment';
cf_name
DROP TABLE t1;
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=my_custom_cf;custom_p2_cfname=baz'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
set @@global.rocksdb_compact_cf = 'foo';
set @@global.rocksdb_compact_cf = 'my_custom_cf';
set @@global.rocksdb_compact_cf = 'baz';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='foo';
cf_name
foo
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_custom_cf';
cf_name
my_custom_cf
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='baz';
cf_name
baz
DROP TABLE t1;
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=t1-p0;custom_p1_cfname=rev:bar;custom_p2_cfname=t1-p2'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
set @@global.rocksdb_compact_cf = 't1-p0';
set @@global.rocksdb_compact_cf = 'rev:bar';
set @@global.rocksdb_compact_cf = 't1-p2';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p0';
cf_name
t1-p0
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:bar';
cf_name
rev:bar
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p2';
cf_name
t1-p2
DROP TABLE t1;
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=cf-zero;custom_p1_cfname=cf-one;custom_p2_cfname=cf-zero'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9),
PARTITION custom_p3 VALUES IN (10, 20, 30)
);
set @@global.rocksdb_compact_cf = 'cf-zero';
set @@global.rocksdb_compact_cf = 'cf-one';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-zero';
cf_name
cf-zero
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-one';
cf_name
cf-one
DROP TABLE t1;
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
INSERT INTO t1 VALUES (1, 1, "one", null);
INSERT INTO t1 VALUES (2, 2, "two", null);
INSERT INTO t1 VALUES (3, 3, "three", null);
INSERT INTO t1 VALUES (5, 5, "five", null);
INSERT INTO t1 VALUES (9, 9, "nine", null);
SELECT * FROM t1;
c1 c2 name event
1 1 one NULL
2 2 two NULL
5 5 five NULL
3 3 three NULL
9 9 nine NULL
ALTER TABLE t1 DROP PRIMARY KEY;
SELECT * FROM t1;
c1 c2 name event
1 1 one NULL
2 2 two NULL
5 5 five NULL
3 3 three NULL
9 9 nine NULL
set @@global.rocksdb_compact_cf = 'foo';
set @@global.rocksdb_compact_cf = 'bar';
set @@global.rocksdb_compact_cf = 'baz';
DROP TABLE t1;
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
INSERT INTO t1 VALUES (1, 1, "one", null);
INSERT INTO t1 VALUES (2, 2, "two", null);
INSERT INTO t1 VALUES (3, 3, "three", null);
INSERT INTO t1 VALUES (5, 5, "five", null);
INSERT INTO t1 VALUES (9, 9, "nine", null);
ALTER TABLE t1 DROP PRIMARY KEY;
ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=p0_cf;custom_p1_cfname=p1_cf';
set @@global.rocksdb_compact_cf = 'p0_cf';
set @@global.rocksdb_compact_cf = 'p1_cf';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p0_cf';
cf_name
p0_cf
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p1_cf';
cf_name
p1_cf
DROP TABLE t1;
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
INSERT INTO t1 VALUES (1, 1, "one", null);
INSERT INTO t1 VALUES (2, 2, "two", null);
INSERT INTO t1 VALUES (3, 3, "three", null);
INSERT INTO t1 VALUES (5, 5, "five", null);
INSERT INTO t1 VALUES (9, 9, "nine", null);
ALTER TABLE t1 PARTITION BY LIST(c1) (
PARTITION custom_p3 VALUES IN (1, 4, 7),
PARTITION custom_p4 VALUES IN (2, 5, 8, 3, 6, 9)
);
ALTER TABLE t1 DROP PRIMARY KEY;
ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p3_cfname=p3_cf;custom_p4_cfname=p4_cf';
set @@global.rocksdb_compact_cf = 'p3_cf';
set @@global.rocksdb_compact_cf = 'p4_cf';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p3_cf';
cf_name
p3_cf
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p4_cf';
cf_name
p4_cf
DROP TABLE t1;
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=;'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
DROP TABLE t1;
CREATE TABLE `t2` (
`col1` bigint(20) NOT NULL,
`col2` varbinary(64) NOT NULL,
`col3` varbinary(256) NOT NULL,
`col4` bigint(20) NOT NULL,
`col5` mediumblob NOT NULL,
PRIMARY KEY (`col1`,`col2`,`col3`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
PARTITION BY LIST COLUMNS (`col2`) (
PARTITION custom_p0 VALUES IN (0x12345),
PARTITION custom_p1 VALUES IN (0x23456),
PARTITION custom_p2 VALUES IN (0x34567),
PARTITION custom_p3 VALUES IN (0x45678),
PARTITION custom_p4 VALUES IN (0x56789),
PARTITION custom_p5 VALUES IN (0x6789A),
PARTITION custom_p6 VALUES IN (0x789AB),
PARTITION custom_p7 VALUES IN (0x89ABC)
);
DROP TABLE t2;
CREATE TABLE `t2` (
`col1` bigint(20) NOT NULL,
`col2` varbinary(64) NOT NULL,
`col3` varbinary(256) NOT NULL,
`col4` bigint(20) NOT NULL,
`col5` mediumblob NOT NULL,
PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=my_cf0;custom_p1_cfname=my_cf1'
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
PARTITION BY LIST COLUMNS (`col2`) (
PARTITION custom_p0 VALUES IN (0x12345),
PARTITION custom_p1 VALUES IN (0x23456),
PARTITION custom_p2 VALUES IN (0x34567),
PARTITION custom_p3 VALUES IN (0x45678),
PARTITION custom_p4 VALUES IN (0x56789),
PARTITION custom_p5 VALUES IN (0x6789A),
PARTITION custom_p6 VALUES IN (0x789AB),
PARTITION custom_p7 VALUES IN (0x89ABC)
);
set @@global.rocksdb_compact_cf = 'my_cf0';
set @@global.rocksdb_compact_cf = 'my_cf1';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf0';
cf_name
my_cf0
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf1';
cf_name
my_cf1
INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2);
INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2);
INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2);
INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3);
INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5);
INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5);
SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2;
col1 HEX(col2) HEX(col3) col4 HEX(col5)
100 012345 01 1 02
200 012345 01 1 02
300 012345 01 1 02
100 023456 02 1 03
100 034567 04 1 05
400 089ABC 04 1 05
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 custom_p0 index NULL PRIMARY 332 NULL 3 Using where; Using index
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 custom_p1 index NULL PRIMARY 332 NULL 2 Using where; Using index
ALTER TABLE t2 DROP PRIMARY KEY;
ALTER TABLE t2 ADD PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=new_cf0;custom_p1_cfname=new_cf1';
set @@global.rocksdb_compact_cf = 'new_cf0';
set @@global.rocksdb_compact_cf = 'new_cf1';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf0';
cf_name
new_cf0
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf1';
cf_name
new_cf1
INSERT INTO t2 VALUES (500, 0x12345, 0x5, 1, 0x2);
INSERT INTO t2 VALUES (700, 0x23456, 0x7, 1, 0x3);
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 custom_p0 index NULL PRIMARY 332 NULL 4 Using where; Using index
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 custom_p1 index NULL PRIMARY 332 NULL 2 Using where; Using index
SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2;
col1 HEX(col2) HEX(col3) col4 HEX(col5)
100 012345 01 1 02
200 012345 01 1 02
300 012345 01 1 02
500 012345 05 1 02
100 023456 02 1 03
700 023456 07 1 03
100 034567 04 1 05
400 089ABC 04 1 05
DROP TABLE t2;
CREATE TABLE `t2` (
`col1` bigint(20) NOT NULL,
`col2` varbinary(64) NOT NULL,
`col3` varbinary(256) NOT NULL,
`col4` bigint(20) NOT NULL,
`col5` mediumblob NOT NULL,
PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1',
KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=test_cf5'
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
PARTITION BY LIST COLUMNS (`col2`) (
PARTITION custom_p0 VALUES IN (0x12345),
PARTITION custom_p1 VALUES IN (0x23456),
PARTITION custom_p2 VALUES IN (0x34567),
PARTITION custom_p3 VALUES IN (0x45678),
PARTITION custom_p4 VALUES IN (0x56789),
PARTITION custom_p5 VALUES IN (0x6789A),
PARTITION custom_p6 VALUES IN (0x789AB),
PARTITION custom_p7 VALUES IN (0x89ABC)
);
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf0';
cf_name
test_cf0
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf1';
cf_name
test_cf1
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf5';
cf_name
test_cf5
INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2);
INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2);
INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2);
INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3);
INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5);
INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5);
INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7);
EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col2 = 0x6789A AND col4 = 1;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 custom_p5 ref col2 col2 74 const,const 1 Using where
ALTER TABLE t2 DROP KEY `col2`;
ALTER TABLE t2 ADD KEY (`col3`, `col4`) COMMENT 'custom_p5_cfname=another_cf_for_p5';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='another_cf_for_p5';
cf_name
another_cf_for_p5
EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col3 = 0x4 AND col2 = 0x34567;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 custom_p2 ref col3 col3 258 const 1 Using where
DROP TABLE t2;
CREATE TABLE `t2` (
`col1` bigint(20) NOT NULL,
`col2` varbinary(64) NOT NULL,
`col3` varbinary(256) NOT NULL,
`col4` bigint(20) NOT NULL,
`col5` mediumblob NOT NULL,
PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1',
UNIQUE KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=unique_test_cf5'
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
PARTITION BY LIST COLUMNS (`col2`) (
PARTITION custom_p0 VALUES IN (0x12345),
PARTITION custom_p1 VALUES IN (0x23456),
PARTITION custom_p2 VALUES IN (0x34567),
PARTITION custom_p3 VALUES IN (0x45678),
PARTITION custom_p4 VALUES IN (0x56789),
PARTITION custom_p5 VALUES IN (0x6789A),
PARTITION custom_p6 VALUES IN (0x789AB),
PARTITION custom_p7 VALUES IN (0x89ABC)
);
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='unique_test_cf5';
cf_name
unique_test_cf5
INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2);
INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2);
ERROR 23000: Duplicate entry '\x01#E-1' for key 'col2'
INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2);
ERROR 23000: Duplicate entry '\x01#E-1' for key 'col2'
INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3);
INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5);
INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5);
INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7);
DROP TABLE t2;
CREATE TABLE t1 (
`a` int,
PRIMARY KEY (a) COMMENT "sharedcf"
) ENGINE=ROCKSDB;
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='sharedcf';
cf_name
sharedcf
CREATE TABLE t2 (
`a` INT,
`b` DATE,
`c` VARCHAR(42),
PRIMARY KEY (`a`) COMMENT "custom_p0_cfname=sharedcf;custom_p2_cfname=notsharedcf"
) ENGINE=ROCKSDB
PARTITION BY LIST(`a`) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='notsharedcf';
cf_name
notsharedcf
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
......@@ -12,7 +12,6 @@ Type Name Status
DBSTATS rocksdb #
CF_COMPACTION __system__ #
CF_COMPACTION cf_t1 #
CF_COMPACTION cf_t4 #
CF_COMPACTION default #
CF_COMPACTION rev:cf_t2 #
Memory_Stats rocksdb #
......@@ -48,15 +47,6 @@ cf_t1 NUM_ENTRIES_ACTIVE_MEM_TABLE #
cf_t1 NUM_ENTRIES_IMM_MEM_TABLES #
cf_t1 NON_BLOCK_CACHE_SST_MEM_USAGE #
cf_t1 NUM_LIVE_VERSIONS #
cf_t4 NUM_IMMUTABLE_MEM_TABLE #
cf_t4 MEM_TABLE_FLUSH_PENDING #
cf_t4 COMPACTION_PENDING #
cf_t4 CUR_SIZE_ACTIVE_MEM_TABLE #
cf_t4 CUR_SIZE_ALL_MEM_TABLES #
cf_t4 NUM_ENTRIES_ACTIVE_MEM_TABLE #
cf_t4 NUM_ENTRIES_IMM_MEM_TABLES #
cf_t4 NON_BLOCK_CACHE_SST_MEM_USAGE #
cf_t4 NUM_LIVE_VERSIONS #
default NUM_IMMUTABLE_MEM_TABLE #
default MEM_TABLE_FLUSH_PENDING #
default COMPACTION_PENDING #
......@@ -117,7 +107,6 @@ __system__ RATE_LIMIT_DELAY_MAX_MILLISECONDS #
__system__ ARENA_BLOCK_SIZE #
__system__ DISABLE_AUTO_COMPACTIONS #
__system__ PURGE_REDUNDANT_KVS_WHILE_FLUSH #
__system__ VERIFY_CHECKSUM_IN_COMPACTION #
__system__ MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
__system__ MEMTABLE_FACTORY #
__system__ INPLACE_UPDATE_SUPPORT #
......@@ -126,7 +115,6 @@ __system__ MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
__system__ MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
__system__ BLOOM_LOCALITY #
__system__ MAX_SUCCESSIVE_MERGES #
__system__ MIN_PARTIAL_MERGE_OPERANDS #
__system__ OPTIMIZE_FILTERS_FOR_HITS #
__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
__system__ COMPRESSION_TYPE #
......@@ -173,7 +161,6 @@ cf_t1 RATE_LIMIT_DELAY_MAX_MILLISECONDS #
cf_t1 ARENA_BLOCK_SIZE #
cf_t1 DISABLE_AUTO_COMPACTIONS #
cf_t1 PURGE_REDUNDANT_KVS_WHILE_FLUSH #
cf_t1 VERIFY_CHECKSUM_IN_COMPACTION #
cf_t1 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
cf_t1 MEMTABLE_FACTORY #
cf_t1 INPLACE_UPDATE_SUPPORT #
......@@ -182,7 +169,6 @@ cf_t1 MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
cf_t1 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
cf_t1 BLOOM_LOCALITY #
cf_t1 MAX_SUCCESSIVE_MERGES #
cf_t1 MIN_PARTIAL_MERGE_OPERANDS #
cf_t1 OPTIMIZE_FILTERS_FOR_HITS #
cf_t1 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
cf_t1 COMPRESSION_TYPE #
......@@ -206,62 +192,6 @@ cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE #
cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION #
cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL #
cf_t1 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION #
cf_t4 COMPARATOR #
cf_t4 MERGE_OPERATOR #
cf_t4 COMPACTION_FILTER #
cf_t4 COMPACTION_FILTER_FACTORY #
cf_t4 WRITE_BUFFER_SIZE #
cf_t4 MAX_WRITE_BUFFER_NUMBER #
cf_t4 MIN_WRITE_BUFFER_NUMBER_TO_MERGE #
cf_t4 NUM_LEVELS #
cf_t4 LEVEL0_FILE_NUM_COMPACTION_TRIGGER #
cf_t4 LEVEL0_SLOWDOWN_WRITES_TRIGGER #
cf_t4 LEVEL0_STOP_WRITES_TRIGGER #
cf_t4 MAX_MEM_COMPACTION_LEVEL #
cf_t4 TARGET_FILE_SIZE_BASE #
cf_t4 TARGET_FILE_SIZE_MULTIPLIER #
cf_t4 MAX_BYTES_FOR_LEVEL_BASE #
cf_t4 LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES #
cf_t4 MAX_BYTES_FOR_LEVEL_MULTIPLIER #
cf_t4 SOFT_RATE_LIMIT #
cf_t4 HARD_RATE_LIMIT #
cf_t4 RATE_LIMIT_DELAY_MAX_MILLISECONDS #
cf_t4 ARENA_BLOCK_SIZE #
cf_t4 DISABLE_AUTO_COMPACTIONS #
cf_t4 PURGE_REDUNDANT_KVS_WHILE_FLUSH #
cf_t4 VERIFY_CHECKSUM_IN_COMPACTION #
cf_t4 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
cf_t4 MEMTABLE_FACTORY #
cf_t4 INPLACE_UPDATE_SUPPORT #
cf_t4 INPLACE_UPDATE_NUM_LOCKS #
cf_t4 MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
cf_t4 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
cf_t4 BLOOM_LOCALITY #
cf_t4 MAX_SUCCESSIVE_MERGES #
cf_t4 MIN_PARTIAL_MERGE_OPERANDS #
cf_t4 OPTIMIZE_FILTERS_FOR_HITS #
cf_t4 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
cf_t4 COMPRESSION_TYPE #
cf_t4 COMPRESSION_PER_LEVEL #
cf_t4 COMPRESSION_OPTS #
cf_t4 BOTTOMMOST_COMPRESSION #
cf_t4 PREFIX_EXTRACTOR #
cf_t4 COMPACTION_STYLE #
cf_t4 COMPACTION_OPTIONS_UNIVERSAL #
cf_t4 COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE #
cf_t4 BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
cf_t4 BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE #
cf_t4 BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
cf_t4 BLOCK_BASED_TABLE_FACTORY::CHECKSUM #
cf_t4 BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE #
cf_t4 BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY #
cf_t4 BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING #
cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE #
cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED #
cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE #
cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION #
cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL #
cf_t4 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION #
default COMPARATOR #
default MERGE_OPERATOR #
default COMPACTION_FILTER #
......@@ -285,7 +215,6 @@ default RATE_LIMIT_DELAY_MAX_MILLISECONDS #
default ARENA_BLOCK_SIZE #
default DISABLE_AUTO_COMPACTIONS #
default PURGE_REDUNDANT_KVS_WHILE_FLUSH #
default VERIFY_CHECKSUM_IN_COMPACTION #
default MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
default MEMTABLE_FACTORY #
default INPLACE_UPDATE_SUPPORT #
......@@ -294,7 +223,6 @@ default MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
default MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
default BLOOM_LOCALITY #
default MAX_SUCCESSIVE_MERGES #
default MIN_PARTIAL_MERGE_OPERANDS #
default OPTIMIZE_FILTERS_FOR_HITS #
default MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
default COMPRESSION_TYPE #
......@@ -341,7 +269,6 @@ rev:cf_t2 RATE_LIMIT_DELAY_MAX_MILLISECONDS #
rev:cf_t2 ARENA_BLOCK_SIZE #
rev:cf_t2 DISABLE_AUTO_COMPACTIONS #
rev:cf_t2 PURGE_REDUNDANT_KVS_WHILE_FLUSH #
rev:cf_t2 VERIFY_CHECKSUM_IN_COMPACTION #
rev:cf_t2 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
rev:cf_t2 MEMTABLE_FACTORY #
rev:cf_t2 INPLACE_UPDATE_SUPPORT #
......@@ -350,7 +277,6 @@ rev:cf_t2 MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
rev:cf_t2 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
rev:cf_t2 BLOOM_LOCALITY #
rev:cf_t2 MAX_SUCCESSIVE_MERGES #
rev:cf_t2 MIN_PARTIAL_MERGE_OPERANDS #
rev:cf_t2 OPTIMIZE_FILTERS_FOR_HITS #
rev:cf_t2 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
rev:cf_t2 COMPRESSION_TYPE #
......
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '<DATA_DIR>' INDEX DIRECTORY = '<INDEX_DIR>';
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL,
`b` char(8) DEFAULT NULL,
PRIMARY KEY (`a`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
ALTER TABLE t1 INDEX DIRECTORY = '<DATA_DIR>';
Warnings:
Warning 1618 <INDEX DIRECTORY> option ignored
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL,
`b` char(8) DEFAULT NULL,
PRIMARY KEY (`a`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '/foo/bar/data';
ERROR HY000: Got error 197 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB
CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INDEX DIRECTORY = '/foo/bar/index';
ERROR HY000: Got error 198 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB
CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=rocksdb PARTITION BY RANGE (id)
(
PARTITION P0 VALUES LESS THAN (1000)
DATA DIRECTORY = '/foo/bar/data/',
PARTITION P1 VALUES LESS THAN (2000)
DATA DIRECTORY = '/foo/bar/data/',
PARTITION P2 VALUES LESS THAN (MAXVALUE)
);
ERROR HY000: Got error 197 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB
CREATE TABLE t1 (id int not null primary key) ENGINE=rocksdb PARTITION BY RANGE (id)
(
PARTITION P0 VALUES LESS THAN (1000)
INDEX DIRECTORY = '/foo/bar/data/',
PARTITION P1 VALUES LESS THAN (2000)
INDEX DIRECTORY = '/foo/bar/data/',
PARTITION P2 VALUES LESS THAN (MAXVALUE)
);
ERROR HY000: Got error 198 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB
......@@ -26,23 +26,23 @@ value
select * from t1 where value2=5;
id value value2
5 5 5
select * from t1 where id < 3;
select * from t1 where id < 3 order by id;
id value value2
1 1 1
2 2 2
select * from t1 where value < 3;
select * from t1 where value < 3 order by id;
id value value2
1 1 1
2 2 2
select value from t1 where value < 3;
select value from t1 where value < 3 order by id;
value
1
2
select * from t1 where value2 < 3;
select * from t1 where value2 < 3 order by id;
id value value2
1 1 1
2 2 2
select * from t1;
select * from t1 order by id;
id value value2
1 1 1
2 2 2
......@@ -54,7 +54,7 @@ id value value2
9 9 9
10 10 10
11 11 11
select value from t1;
select value from t1 order by id;
value
1
2
......@@ -93,23 +93,23 @@ value
select * from t1 where value2=5;
id value value2
5 5 5
select * from t1 where id < 3;
select * from t1 where id < 3 order by id;
id value value2
1 1 1
2 2 2
select * from t1 where value < 3;
select * from t1 where value < 3 order by id;
id value value2
1 1 1
2 2 2
select value from t1 where value < 3;
select value from t1 where value < 3 order by id;
value
1
2
select * from t1 where value2 < 3;
select * from t1 where value2 < 3 order by id;
id value value2
1 1 1
2 2 2
select * from t1;
select * from t1 order by id;
id value value2
1 1 1
2 2 2
......@@ -121,7 +121,7 @@ id value value2
8 8 8
9 9 9
10 10 10
select value from t1;
select value from t1 order by id;
value
1
2
......@@ -159,22 +159,22 @@ value
select * from t1 where value2=5;
id value value2
5 5 5
select * from t1 where id < 3;
select * from t1 where id < 3 order by id;
id value value2
1 1 100
2 2 2
select * from t1 where value < 3;
select * from t1 where value < 3 order by id;
id value value2
1 1 100
2 2 2
select value from t1 where value < 3;
select value from t1 where value < 3 order by id;
value
1
2
select * from t1 where value2 < 3;
select * from t1 where value2 < 3 order by id;
id value value2
2 2 2
select * from t1;
select * from t1 order by id;
id value value2
1 1 100
2 2 2
......@@ -185,7 +185,7 @@ id value value2
8 8 8
9 9 9
10 10 10
select value from t1;
select value from t1 order by id;
value
1
2
......@@ -221,21 +221,21 @@ value
select * from t1 where value2=5;
id value value2
5 5 5
select * from t1 where id < 3;
select * from t1 where id < 3 order by id;
id value value2
1 100 1
2 2 2
select * from t1 where value < 3;
select * from t1 where value < 3 order by id;
id value value2
2 2 2
select value from t1 where value < 3;
select value from t1 where value < 3 order by id;
value
2
select * from t1 where value2 < 3;
select * from t1 where value2 < 3 order by id;
id value value2
1 100 1
2 2 2
select * from t1;
select * from t1 order by id;
id value value2
1 100 1
2 2 2
......@@ -246,8 +246,9 @@ id value value2
8 8 8
9 9 9
10 10 10
select value from t1;
select value from t1 order by id;
value
100
2
3
4
......@@ -256,7 +257,6 @@ value
8
9
10
100
rollback;
begin;
update t1 set id=100 where id=1;
......@@ -283,22 +283,22 @@ value
select * from t1 where value2=5;
id value value2
5 5 5
select * from t1 where id < 3;
select * from t1 where id < 3 order by id;
id value value2
2 2 2
select * from t1 where value < 3;
select * from t1 where value < 3 order by id;
id value value2
100 1 1
2 2 2
select value from t1 where value < 3;
100 1 1
select value from t1 where value < 3 order by id;
value
1
2
select * from t1 where value2 < 3;
1
select * from t1 where value2 < 3 order by id;
id value value2
2 2 2
100 1 1
select * from t1;
select * from t1 order by id;
id value value2
2 2 2
3 3 3
......@@ -309,9 +309,8 @@ id value value2
9 9 9
10 10 10
100 1 1
select value from t1;
select value from t1 order by id;
value
1
2
3
4
......@@ -320,6 +319,7 @@ value
8
9
10
1
rollback;
begin;
update t1 set value2=100 where value=1;
......@@ -346,22 +346,22 @@ value
select * from t1 where value2=5;
id value value2
5 5 5
select * from t1 where id < 3;
select * from t1 where id < 3 order by id;
id value value2
1 1 100
2 2 2
select * from t1 where value < 3;
select * from t1 where value < 3 order by id;
id value value2
1 1 100
2 2 2
select value from t1 where value < 3;
select value from t1 where value < 3 order by id;
value
1
2
select * from t1 where value2 < 3;
select * from t1 where value2 < 3 order by id;
id value value2
2 2 2
select * from t1;
select * from t1 order by id;
id value value2
1 1 100
2 2 2
......@@ -372,7 +372,7 @@ id value value2
8 8 8
9 9 9
10 10 10
select value from t1;
select value from t1 order by id;
value
1
2
......@@ -408,21 +408,21 @@ value
select * from t1 where value2=5;
id value value2
5 5 5
select * from t1 where id < 3;
select * from t1 where id < 3 order by id;
id value value2
1 100 1
2 2 2
select * from t1 where value < 3;
select * from t1 where value < 3 order by id;
id value value2
2 2 2
select value from t1 where value < 3;
select value from t1 where value < 3 order by id;
value
2
select * from t1 where value2 < 3;
select * from t1 where value2 < 3 order by id;
id value value2
1 100 1
2 2 2
select * from t1;
select * from t1 order by id;
id value value2
1 100 1
2 2 2
......@@ -433,8 +433,9 @@ id value value2
8 8 8
9 9 9
10 10 10
select value from t1;
select value from t1 order by id;
value
100
2
3
4
......@@ -443,7 +444,6 @@ value
8
9
10
100
rollback;
begin;
update t1 set id=100 where value=1;
......@@ -470,22 +470,22 @@ value
select * from t1 where value2=5;
id value value2
5 5 5
select * from t1 where id < 3;
select * from t1 where id < 3 order by id;
id value value2
2 2 2
select * from t1 where value < 3;
select * from t1 where value < 3 order by id;
id value value2
100 1 1
2 2 2
select value from t1 where value < 3;
100 1 1
select value from t1 where value < 3 order by id;
value
1
2
select * from t1 where value2 < 3;
1
select * from t1 where value2 < 3 order by id;
id value value2
2 2 2
100 1 1
select * from t1;
select * from t1 order by id;
id value value2
2 2 2
3 3 3
......@@ -496,9 +496,8 @@ id value value2
9 9 9
10 10 10
100 1 1
select value from t1;
select value from t1 order by id;
value
1
2
3
4
......@@ -507,6 +506,7 @@ value
8
9
10
1
rollback;
begin;
update t1 set value2=100 where value2=1;
......@@ -533,22 +533,22 @@ value
select * from t1 where value2=5;
id value value2
5 5 5
select * from t1 where id < 3;
select * from t1 where id < 3 order by id;
id value value2
1 1 100
2 2 2
select * from t1 where value < 3;
select * from t1 where value < 3 order by id;
id value value2
1 1 100
2 2 2
select value from t1 where value < 3;
select value from t1 where value < 3 order by id;
value
1
2
select * from t1 where value2 < 3;
select * from t1 where value2 < 3 order by id;
id value value2
2 2 2
select * from t1;
select * from t1 order by id;
id value value2
1 1 100
2 2 2
......@@ -559,7 +559,7 @@ id value value2
8 8 8
9 9 9
10 10 10
select value from t1;
select value from t1 order by id;
value
1
2
......@@ -595,21 +595,21 @@ value
select * from t1 where value2=5;
id value value2
5 5 5
select * from t1 where id < 3;
select * from t1 where id < 3 order by id;
id value value2
1 100 1
2 2 2
select * from t1 where value < 3;
select * from t1 where value < 3 order by id;
id value value2
2 2 2
select value from t1 where value < 3;
select value from t1 where value < 3 order by id;
value
2
select * from t1 where value2 < 3;
select * from t1 where value2 < 3 order by id;
id value value2
1 100 1
2 2 2
select * from t1;
select * from t1 order by id;
id value value2
1 100 1
2 2 2
......@@ -620,8 +620,9 @@ id value value2
8 8 8
9 9 9
10 10 10
select value from t1;
select value from t1 order by id;
value
100
2
3
4
......@@ -630,7 +631,6 @@ value
8
9
10
100
rollback;
begin;
update t1 set id=100 where value2=1;
......@@ -657,22 +657,22 @@ value
select * from t1 where value2=5;
id value value2
5 5 5
select * from t1 where id < 3;
select * from t1 where id < 3 order by id;
id value value2
2 2 2
select * from t1 where value < 3;
select * from t1 where value < 3 order by id;
id value value2
100 1 1
2 2 2
select value from t1 where value < 3;
100 1 1
select value from t1 where value < 3 order by id;
value
1
2
select * from t1 where value2 < 3;
1
select * from t1 where value2 < 3 order by id;
id value value2
2 2 2
100 1 1
select * from t1;
select * from t1 order by id;
id value value2
2 2 2
3 3 3
......@@ -683,9 +683,8 @@ id value value2
9 9 9
10 10 10
100 1 1
select value from t1;
select value from t1 order by id;
value
1
2
3
4
......@@ -694,6 +693,7 @@ value
8
9
10
1
rollback;
begin;
delete from t1 where id=1;
......@@ -717,19 +717,19 @@ value
select * from t1 where value2=5;
id value value2
5 5 5
select * from t1 where id < 3;
select * from t1 where id < 3 order by id;
id value value2
2 2 2
select * from t1 where value < 3;
select * from t1 where value < 3 order by id;
id value value2
2 2 2
select value from t1 where value < 3;
select value from t1 where value < 3 order by id;
value
2
select * from t1 where value2 < 3;
select * from t1 where value2 < 3 order by id;
id value value2
2 2 2
select * from t1;
select * from t1 order by id;
id value value2
2 2 2
3 3 3
......@@ -739,7 +739,7 @@ id value value2
8 8 8
9 9 9
10 10 10
select value from t1;
select value from t1 order by id;
value
2
3
......@@ -772,19 +772,19 @@ value
select * from t1 where value2=5;
id value value2
5 5 5
select * from t1 where id < 3;
select * from t1 where id < 3 order by id;
id value value2
2 2 2
select * from t1 where value < 3;
select * from t1 where value < 3 order by id;
id value value2
2 2 2
select value from t1 where value < 3;
select value from t1 where value < 3 order by id;
value
2
select * from t1 where value2 < 3;
select * from t1 where value2 < 3 order by id;
id value value2
2 2 2
select * from t1;
select * from t1 order by id;
id value value2
2 2 2
3 3 3
......@@ -794,7 +794,7 @@ id value value2
8 8 8
9 9 9
10 10 10
select value from t1;
select value from t1 order by id;
value
2
3
......@@ -827,19 +827,19 @@ value
select * from t1 where value2=5;
id value value2
5 5 5
select * from t1 where id < 3;
select * from t1 where id < 3 order by id;
id value value2
2 2 2
select * from t1 where value < 3;
select * from t1 where value < 3 order by id;
id value value2
2 2 2
select value from t1 where value < 3;
select value from t1 where value < 3 order by id;
value
2
select * from t1 where value2 < 3;
select * from t1 where value2 < 3 order by id;
id value value2
2 2 2
select * from t1;
select * from t1 order by id;
id value value2
2 2 2
3 3 3
......@@ -849,7 +849,7 @@ id value value2
8 8 8
9 9 9
10 10 10
select value from t1;
select value from t1 order by id;
value
2
3
......@@ -892,23 +892,23 @@ value
select * from t1 where value2=5;
id value value2
100 5 5
select * from t1 where id < 3;
select * from t1 where id < 3 order by id;
id value value2
1 1 1
2 2 2
select * from t1 where value < 3;
select * from t1 where value < 3 order by id;
id value value2
1 1 1
2 2 2
select value from t1 where value < 3;
select value from t1 where value < 3 order by id;
value
1
2
select * from t1 where value2 < 3;
select * from t1 where value2 < 3 order by id;
id value value2
1 1 1
2 2 2
select * from t1;
select * from t1 order by id;
id value value2
1 1 1
2 2 2
......@@ -920,17 +920,17 @@ id value value2
13 13 13
100 5 5
115 3 3
select value from t1;
select value from t1 order by id;
value
1
2
3
5
103
6
10
11
12
13
103
5
3
rollback;
drop table t1;
......@@ -70,3 +70,15 @@ id id2 value
1 1 1
set debug_sync='RESET';
drop table t1, t2;
drop table if exists t1,t2,t3;
create table t1 (id int, value int, primary key (id)) engine=rocksdb;
create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb;
create table t3 (id int, value int) engine=rocksdb;
SET @old_val = @@session.unique_checks;
set @@session.unique_checks = FALSE;
insert into t1 values (1, 1), (1, 2);
insert into t2 values (1, 1, 1), (1, 2, 1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
insert into t3 values (1, 1), (1, 1);
set @@session.unique_checks = @old_val;
drop table t1, t2, t3;
SET GLOBAL rocksdb_write_disable_wal=false;
SET GLOBAL rocksdb_write_ignore_missing_column_families=true;
create table aaa (id int primary key, i int) engine rocksdb;
SET LOCAL rocksdb_write_sync=off;
SET LOCAL rocksdb_flush_log_at_trx_commit=0;
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced';
insert aaa(id, i) values(1,1);
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
......@@ -15,7 +15,7 @@ insert aaa(id, i) values(3,1);
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
variable_value-@a
0
SET LOCAL rocksdb_write_sync=1;
SET LOCAL rocksdb_flush_log_at_trx_commit=1;
insert aaa(id, i) values(4,1);
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
variable_value-@a
......@@ -29,11 +29,11 @@ select variable_value-@a from information_schema.global_status where variable_na
variable_value-@a
3
SET GLOBAL rocksdb_background_sync=on;
SET LOCAL rocksdb_write_sync=off;
SET LOCAL rocksdb_flush_log_at_trx_commit=0;
insert aaa(id, i) values(7,1);
truncate table aaa;
drop table aaa;
SET GLOBAL rocksdb_write_sync=off;
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
SET GLOBAL rocksdb_write_disable_wal=false;
SET GLOBAL rocksdb_write_ignore_missing_column_families=false;
SET GLOBAL rocksdb_background_sync=off;
......@@ -14,7 +14,7 @@ CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE=
--echo # 2PC enabled, MyRocks durability enabled
SET GLOBAL rocksdb_enable_2pc=0;
SET GLOBAL rocksdb_write_sync=1;
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
--echo ## 2PC + durability + single thread
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
......@@ -29,7 +29,7 @@ select case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true'
--echo # 2PC enabled, MyRocks durability disabled
SET GLOBAL rocksdb_enable_2pc=0;
SET GLOBAL rocksdb_write_sync=0;
SET GLOBAL rocksdb_flush_log_at_trx_commit=0;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
......@@ -42,7 +42,7 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa
--echo # 2PC disabled, MyRocks durability enabled
SET GLOBAL rocksdb_enable_2pc=1;
SET GLOBAL rocksdb_write_sync=1;
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
......@@ -59,6 +59,6 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa
SET GLOBAL rocksdb_enable_2pc=1;
SET GLOBAL rocksdb_write_sync=0;
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
DROP TABLE t1;
DROP DATABASE mysqlslap;
......@@ -341,5 +341,3 @@ while ($i <= $max) {
#SHOW TABLE STATUS WHERE name LIKE 't1';
DROP TABLE t1;
--source include/have_rocksdb.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Test that fast secondary index creation updates cardinality properly
CREATE TABLE t1 (i INT PRIMARY KEY, j INT) ENGINE = ROCKSDB;
INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10);
SET debug_sync= 'rocksdb.commit_in_place_alter_table WAIT_FOR flushed';
send ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
connect (con1,localhost,root,,);
# Flush memtable out to SST
SET GLOBAL rocksdb_force_flush_memtable_now = 1;
SET debug_sync= 'now SIGNAL flushed';
connection default;
reap;
# Return the data for the primary key of t1
--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 #
SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
WHERE INDEX_NUMBER =
(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY");
# Return the data for the secondary index of t1
--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 #
SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
WHERE INDEX_NUMBER =
(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
WHERE TABLE_NAME = 't1' AND INDEX_NAME = "kj");
disconnect con1;
SET debug_sync='RESET';
# cleanup
DROP TABLE t1;
!include suite/rpl/my.cnf
[mysqld.1]
sync_binlog=0
binlog_format=row
slave-exec-mode=strict
[mysqld.2]
sync_binlog=0
binlog_format=row
slave-exec-mode=strict
--source include/have_rocksdb.inc
source include/master-slave.inc;
connection master;
set @save_rocksdb_blind_delete_primary_key=@@session.rocksdb_blind_delete_primary_key;
set @save_rocksdb_master_skip_tx_api=@@session.rocksdb_master_skip_tx_api;
--disable_warnings
DROP TABLE IF EXISTS t1,t2;
--enable_warnings
create table t1 (id int primary key, value int, value2 varchar(200)) engine=rocksdb;
create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb;
--disable_query_log
let $t = 1;
while ($t <= 2) {
let $i = 1;
while ($i <= 10000) {
let $insert = INSERT INTO t$t VALUES($i, $i, REPEAT('x', 150));
inc $i;
eval $insert;
}
inc $t;
}
--enable_query_log
SET session rocksdb_blind_delete_primary_key=1;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
# Deleting 1000 rows from t1
--disable_query_log
let $i = 1;
while ($i <= 1000) {
let $insert = DELETE FROM t1 WHERE id=$i;
inc $i;
eval $insert;
}
--enable_query_log
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
SELECT count(*) FROM t1;
--source include/sync_slave_sql_with_master.inc
connection slave;
SELECT count(*) FROM t1;
connection master;
# Deleting 1000 rows from t2 (blind delete disabled because of secondary key)
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
--disable_query_log
let $i = 1;
while ($i <= 1000) {
let $insert = DELETE FROM t2 WHERE id=$i;
inc $i;
eval $insert;
}
--enable_query_log
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
SELECT count(*) FROM t2;
SET session rocksdb_master_skip_tx_api=1;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
--disable_query_log
let $t = 1;
while ($t <= 2) {
let $i = 1001;
while ($i <= 2000) {
let $insert = DELETE FROM t$t WHERE id=$i;
inc $i;
eval $insert;
}
inc $t;
}
--enable_query_log
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
SELECT count(*) FROM t1;
SELECT count(*) FROM t2;
--source include/sync_slave_sql_with_master.inc
connection slave;
SELECT count(*) FROM t1;
SELECT count(*) FROM t2;
connection master;
# Range Deletes (blind delete disabled)
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
DELETE FROM t1 WHERE id BETWEEN 3001 AND 4000;
DELETE FROM t2 WHERE id BETWEEN 3001 AND 4000;
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
SELECT count(*) FROM t1;
SELECT count(*) FROM t2;
--source include/sync_slave_sql_with_master.inc
connection slave;
SELECT count(*) FROM t1;
SELECT count(*) FROM t2;
connection master;
# Deleting same keys (slaves stop)
DELETE FROM t1 WHERE id = 10;
SELECT count(*) FROM t1;
connection slave;
call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table test.t1.*Error_code.*");
call mtr.add_suppression("Slave: Can't find record in 't1'.*");
# wait until we have the expected error
--let $slave_sql_errno= convert_error(ER_KEY_NOT_FOUND)
--source include/wait_for_slave_sql_error.inc
connection slave;
set @save_rocksdb_read_free_rpl_tables=@@global.rocksdb_read_free_rpl_tables;
set global rocksdb_read_free_rpl_tables="t.*";
START SLAVE;
connection master;
--source include/sync_slave_sql_with_master.inc
connection slave;
SELECT count(*) FROM t1;
connection master;
# cleanup
connection slave;
set global rocksdb_read_free_rpl_tables=@save_rocksdb_read_free_rpl_tables;
connection master;
SET session rocksdb_blind_delete_primary_key=@save_rocksdb_blind_delete_primary_key;
SET session rocksdb_master_skip_tx_api=@save_rocksdb_master_skip_tx_api;
DROP TABLE t1, t2;
--source include/rpl_end.inc
--source include/have_rocksdb.inc
--disable_warnings
DROP TABLE IF EXISTS t1, t2;
DROP TABLE IF EXISTS t1, t2, t3;
--enable_warnings
# Create a table with a primary key and one secondary key as well as one
......@@ -25,7 +25,7 @@ CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE
perl;
my $fn = $ENV{'ROCKSDB_INFILE'};
open(my $fh, '>>', $fn) || die "perl open($fn): $!";
my $max = 10000000;
my $max = 5000000;
my @chars = ("A".."Z", "a".."z", "0".."9");
my @lowerchars = ("a".."z");
my @powers_of_26 = (26 * 26 * 26 * 26, 26 * 26 * 26, 26 * 26, 26, 1);
......
......@@ -177,5 +177,8 @@ CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=ro
CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
DROP TABLE abc;
# test bad regex (null caused a crash) - Issue 493
SET GLOBAL rocksdb_strict_collation_exceptions=null;
# cleanup
SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value;
......@@ -37,7 +37,7 @@ SELECT * FROM t1;
SHOW SESSION STATUS LIKE 'Handler_read%';
FLUSH STATUS;
SELECT * FROM t1 WHERE b <=5 ORDER BY b;
SELECT * FROM t1 FORCE INDEX(b) WHERE b <=5 ORDER BY b;
SHOW SESSION STATUS LIKE 'Handler_read%';
FLUSH STATUS;
......@@ -50,4 +50,3 @@ SHOW SESSION STATUS LIKE 'Handler_read%';
# Cleanup
DROP TABLE t1;
......@@ -35,6 +35,6 @@ insert into linktable (id1, link_type, id2) values (2, 1, 9);
insert into linktable (id1, link_type, id2) values (2, 1, 10);
--replace_column 9 #
explain select id1, id2, link_type, data from linktable where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5);
explain select id1, id2, link_type, data from linktable force index(primary) where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5);
drop table linktable;
drop table if exists t;
create table t (
a int,
b int,
c varchar(12249) collate latin1_bin,
d datetime,
e int,
f int,
g blob,
h int,
i int,
key (b,e),
key (h,b)
) engine=rocksdb
partition by linear hash (i) partitions 8 ;
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
select i from t group by h;
select i from t group by h;
drop table t;
......@@ -51,6 +51,9 @@ SET GLOBAL default_storage_engine=rocksdb;
--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test
--exec grep "START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT" $MYSQLTEST_VARDIR/mysqld.1/mysqld.log | wc -l
# Sanity test mysqldump when the --innodb-stats-on-metadata is specified (no effect)
--echo ==== mysqldump with --innodb-stats-on-metadata ====
--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --innodb-stats-on-metadata test
# wiping general log so that this test case doesn't fail with --repeat
--exec echo "" > $MYSQLTEST_VARDIR/mysqld.1/mysqld.log
......
......@@ -8,6 +8,46 @@ DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS VAR_POP;
DROP TABLE IF EXISTS TEMP0;
DROP TABLE IF EXISTS VAR_SAMP;
DROP TABLE IF EXISTS ti;
DROP TABLE IF EXISTS members;
DROP TABLE IF EXISTS members_2;
DROP TABLE IF EXISTS employees;
DROP TABLE IF EXISTS employees_2;
DROP TABLE IF EXISTS employees_3;
DROP TABLE IF EXISTS quarterly_report_status;
DROP TABLE IF EXISTS employees_4;
DROP TABLE IF EXISTS h2;
DROP TABLE IF EXISTS rcx;
DROP TABLE IF EXISTS r1;
DROP TABLE IF EXISTS rc1;
DROP TABLE IF EXISTS rx;
DROP TABLE IF EXISTS rc2;
DROP TABLE IF EXISTS rc3;
DROP TABLE IF EXISTS rc4;
DROP TABLE IF EXISTS employees_by_lname;
DROP TABLE IF EXISTS customers_1;
DROP TABLE IF EXISTS customers_2;
DROP TABLE IF EXISTS customers_3;
DROP TABLE IF EXISTS employees_hash;
DROP TABLE IF EXISTS employees_hash_1;
DROP TABLE IF EXISTS t1_hash;
DROP TABLE IF EXISTS employees_linear_hash;
DROP TABLE IF EXISTS t1_linear_hash;
DROP TABLE IF EXISTS k1;
DROP TABLE IF EXISTS k2;
DROP TABLE IF EXISTS tm1;
DROP TABLE IF EXISTS tk;
DROP TABLE IF EXISTS ts;
DROP TABLE IF EXISTS ts_1;
DROP TABLE IF EXISTS ts_3;
DROP TABLE IF EXISTS ts_4;
DROP TABLE IF EXISTS ts_5;
DROP TABLE IF EXISTS trb3;
DROP TABLE IF EXISTS tr;
DROP TABLE IF EXISTS members_3;
DROP TABLE IF EXISTS clients;
DROP TABLE IF EXISTS clients_lk;
DROP TABLE IF EXISTS trb1;
--enable_warnings
......@@ -22,6 +62,12 @@ while ($i <= $max) {
eval $insert;
}
ALTER TABLE t1 REBUILD PARTITION p0, p1;
ALTER TABLE t1 OPTIMIZE PARTITION p0, p1;
ALTER TABLE t1 ANALYZE PARTITION p3;
ALTER TABLE t1 REPAIR PARTITION p0,p1;
ALTER TABLE t1 CHECK PARTITION p1;
# Parition string is "#P#". To verify that parsing is done correctly then we'll
# verify if tables containing "P" somwhere can be created correctly.
CREATE TABLE VAR_POP (a int) ENGINE = ROCKSDB;
......@@ -35,8 +81,677 @@ SHOW TABLES;
SELECT * FROM t1 ORDER BY i LIMIT 10;
SELECT COUNT(*) FROM t1;
DROP TABLE t1;
DROP TABLE VAR_POP;
DROP TABLE TEMP0;
DROP TABLE VAR_SAMP;
#
# Test-cases above are copied from
# https://dev.mysql.com/doc/refman/5.6/en/partitioning.html to validate that the
# partitioning related examples work with MyRocks.
#
# Create a table that is partitioned by hash into 6 partitions.
CREATE TABLE ti(
id INT,
amount DECIMAL(7,2),
tr_date DATE
) ENGINE=ROCKSDB
PARTITION BY HASH(MONTH(tr_date))
PARTITIONS 6;
CREATE TABLE members (
firstname VARCHAR(25) NOT NULL,
lastname VARCHAR(25) NOT NULL,
username VARCHAR(16) NOT NULL,
email VARCHAR(35),
joined DATE NOT NULL
) ENGINE=ROCKSDB
PARTITION BY KEY(joined)
PARTITIONS 6;
CREATE TABLE members_2 (
firstname VARCHAR(25) NOT NULL,
lastname VARCHAR(25) NOT NULL,
username VARCHAR(16) NOT NULL,
email VARCHAR(35),
joined DATE NOT NULL
) ENGINE=ROCKSDB
PARTITION BY RANGE(YEAR(joined)) (
PARTITION p0 VALUES LESS THAN (1960),
PARTITION p1 VALUES LESS THAN (1970),
PARTITION p2 VALUES LESS THAN (1980),
PARTITION p3 VALUES LESS THAN (1990),
PARTITION p4 VALUES LESS THAN MAXVALUE
);
# Partition names are not case-sensitive.
--error 1517
CREATE TABLE t2 (val INT)
ENGINE=ROCKSDB
PARTITION BY LIST(val)(
PARTITION mypart VALUES IN (1,3,5),
PARTITION MyPart VALUES IN (2,4,6)
);
CREATE TABLE employees (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT NOT NULL,
store_id INT NOT NULL
) ENGINE=ROCKSDB
PARTITION BY RANGE (store_id) (
PARTITION p0 VALUES LESS THAN (6),
PARTITION p1 VALUES LESS THAN (11),
PARTITION p2 VALUES LESS THAN (16),
PARTITION p3 VALUES LESS THAN MAXVALUE
);
CREATE TABLE employees_2 (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT NOT NULL,
store_id INT NOT NULL
) ENGINE=ROCKSDB
PARTITION BY RANGE (job_code) (
PARTITION p0 VALUES LESS THAN (100),
PARTITION p1 VALUES LESS THAN (1000),
PARTITION p2 VALUES LESS THAN (10000)
);
CREATE TABLE employees_3 (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
) ENGINE=ROCKSDB
PARTITION BY RANGE (YEAR(separated)) (
PARTITION p0 VALUES LESS THAN (1991),
PARTITION p1 VALUES LESS THAN (1996),
PARTITION p2 VALUES LESS THAN (2001),
PARTITION p3 VALUES LESS THAN MAXVALUE
);
CREATE TABLE quarterly_report_status (
report_id INT NOT NULL,
report_status VARCHAR(20) NOT NULL,
report_updated TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
) ENGINE=ROCKSDB
PARTITION BY RANGE (UNIX_TIMESTAMP(report_updated)) (
PARTITION p0 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-01-01 00:00:00') ),
PARTITION p1 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-04-01 00:00:00') ),
PARTITION p2 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-07-01 00:00:00') ),
PARTITION p3 VALUES LESS THAN ( UNIX_TIMESTAMP('2008-10-01 00:00:00') ),
PARTITION p4 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-01-01 00:00:00') ),
PARTITION p5 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-04-01 00:00:00') ),
PARTITION p6 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-07-01 00:00:00') ),
PARTITION p7 VALUES LESS THAN ( UNIX_TIMESTAMP('2009-10-01 00:00:00') ),
PARTITION p8 VALUES LESS THAN ( UNIX_TIMESTAMP('2010-01-01 00:00:00') ),
PARTITION p9 VALUES LESS THAN (MAXVALUE)
);
CREATE TABLE employees_4 (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
) ENGINE=ROCKSDB
PARTITION BY LIST(store_id) (
PARTITION pNorth VALUES IN (3,5,6,9,17),
PARTITION pEast VALUES IN (1,2,10,11,19,20),
PARTITION pWest VALUES IN (4,12,13,14,18),
PARTITION pCentral VALUES IN (7,8,15,16)
);
CREATE TABLE h2 (
c1 INT,
c2 INT
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION p0 VALUES IN (1, 4, 7),
PARTITION p1 VALUES IN (2, 5, 8)
);
# ERROR 1526 (HY000): Table has no partition for value 3
--error 1526
INSERT INTO h2 VALUES (3, 5);
CREATE TABLE rcx (
a INT,
b INT,
c CHAR(3),
d INT
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS(a,d,c) (
PARTITION p0 VALUES LESS THAN (5,10,'ggg'),
PARTITION p1 VALUES LESS THAN (10,20,'mmm'),
PARTITION p2 VALUES LESS THAN (15,30,'sss'),
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE)
);
CREATE TABLE r1 (
a INT,
b INT
) ENGINE=ROCKSDB
PARTITION BY RANGE (a) (
PARTITION p0 VALUES LESS THAN (5),
PARTITION p1 VALUES LESS THAN (MAXVALUE)
);
INSERT INTO r1 VALUES (5,10), (5,11), (5,12);
CREATE TABLE rc1 (
a INT,
b INT
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS(a, b) (
PARTITION p0 VALUES LESS THAN (5, 12),
PARTITION p3 VALUES LESS THAN (MAXVALUE, MAXVALUE)
);
INSERT INTO rc1 VALUES (5,10), (5,11), (5,12);
SELECT (5,10) < (5,12), (5,11) < (5,12), (5,12) < (5,12);
CREATE TABLE rx (
a INT,
b INT
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS (a) (
PARTITION p0 VALUES LESS THAN (5),
PARTITION p1 VALUES LESS THAN (MAXVALUE)
);
INSERT INTO rx VALUES (5,10), (5,11), (5,12);
CREATE TABLE rc2 (
a INT,
b INT
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS(a,b) (
PARTITION p0 VALUES LESS THAN (0,10),
PARTITION p1 VALUES LESS THAN (10,20),
PARTITION p2 VALUES LESS THAN (10,30),
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE)
);
CREATE TABLE rc3 (
a INT,
b INT
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS(a,b) (
PARTITION p0 VALUES LESS THAN (0,10),
PARTITION p1 VALUES LESS THAN (10,20),
PARTITION p2 VALUES LESS THAN (10,30),
PARTITION p3 VALUES LESS THAN (10,35),
PARTITION p4 VALUES LESS THAN (20,40),
PARTITION p5 VALUES LESS THAN (MAXVALUE,MAXVALUE)
);
CREATE TABLE rc4 (
a INT,
b INT,
c INT
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS(a,b,c) (
PARTITION p0 VALUES LESS THAN (0,25,50),
PARTITION p1 VALUES LESS THAN (10,20,100),
PARTITION p2 VALUES LESS THAN (10,30,50),
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE)
);
SELECT (0,25,50) < (10,20,100), (10,20,100) < (10,30,50);
-- ERROR 1493 (HY000): VALUES LESS THAN value must be strictly increasing for each partition
--error 1493
CREATE TABLE rcf (
a INT,
b INT,
c INT
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS(a,b,c) (
PARTITION p0 VALUES LESS THAN (0,25,50),
PARTITION p1 VALUES LESS THAN (20,20,100),
PARTITION p2 VALUES LESS THAN (10,30,50),
PARTITION p3 VALUES LESS THAN (MAXVALUE,MAXVALUE,MAXVALUE)
);
CREATE TABLE employees_by_lname (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT NOT NULL,
store_id INT NOT NULL
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS (lname) (
PARTITION p0 VALUES LESS THAN ('g'),
PARTITION p1 VALUES LESS THAN ('m'),
PARTITION p2 VALUES LESS THAN ('t'),
PARTITION p3 VALUES LESS THAN (MAXVALUE)
);
ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (lname) (
PARTITION p0 VALUES LESS THAN ('g'),
PARTITION p1 VALUES LESS THAN ('m'),
PARTITION p2 VALUES LESS THAN ('t'),
PARTITION p3 VALUES LESS THAN (MAXVALUE)
);
ALTER TABLE employees_by_lname PARTITION BY RANGE COLUMNS (hired) (
PARTITION p0 VALUES LESS THAN ('1970-01-01'),
PARTITION p1 VALUES LESS THAN ('1980-01-01'),
PARTITION p2 VALUES LESS THAN ('1990-01-01'),
PARTITION p3 VALUES LESS THAN ('2000-01-01'),
PARTITION p4 VALUES LESS THAN ('2010-01-01'),
PARTITION p5 VALUES LESS THAN (MAXVALUE)
);
CREATE TABLE customers_1 (
first_name VARCHAR(25),
last_name VARCHAR(25),
street_1 VARCHAR(30),
street_2 VARCHAR(30),
city VARCHAR(15),
renewal DATE
) ENGINE=ROCKSDB
PARTITION BY LIST COLUMNS(city) (
PARTITION pRegion_1 VALUES IN('Oskarshamn', 'Högsby', 'Mönsterås'),
PARTITION pRegion_2 VALUES IN('Vimmerby', 'Hultsfred', 'Västervik'),
PARTITION pRegion_3 VALUES IN('Nässjö', 'Eksjö', 'Vetlanda'),
PARTITION pRegion_4 VALUES IN('Uppvidinge', 'Alvesta', 'Växjo')
);
CREATE TABLE customers_2 (
first_name VARCHAR(25),
last_name VARCHAR(25),
street_1 VARCHAR(30),
street_2 VARCHAR(30),
city VARCHAR(15),
renewal DATE
) ENGINE=ROCKSDB
PARTITION BY LIST COLUMNS(renewal) (
PARTITION pWeek_1 VALUES IN('2010-02-01', '2010-02-02', '2010-02-03',
'2010-02-04', '2010-02-05', '2010-02-06', '2010-02-07'),
PARTITION pWeek_2 VALUES IN('2010-02-08', '2010-02-09', '2010-02-10',
'2010-02-11', '2010-02-12', '2010-02-13', '2010-02-14'),
PARTITION pWeek_3 VALUES IN('2010-02-15', '2010-02-16', '2010-02-17',
'2010-02-18', '2010-02-19', '2010-02-20', '2010-02-21'),
PARTITION pWeek_4 VALUES IN('2010-02-22', '2010-02-23', '2010-02-24',
'2010-02-25', '2010-02-26', '2010-02-27', '2010-02-28')
);
CREATE TABLE customers_3 (
first_name VARCHAR(25),
last_name VARCHAR(25),
street_1 VARCHAR(30),
street_2 VARCHAR(30),
city VARCHAR(15),
renewal DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE COLUMNS(renewal) (
PARTITION pWeek_1 VALUES LESS THAN('2010-02-09'),
PARTITION pWeek_2 VALUES LESS THAN('2010-02-15'),
PARTITION pWeek_3 VALUES LESS THAN('2010-02-22'),
PARTITION pWeek_4 VALUES LESS THAN('2010-03-01')
);
CREATE TABLE employees_hash (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
) ENGINE=ROCKSDB
PARTITION BY HASH(store_id)
PARTITIONS 4;
CREATE TABLE employees_hash_1 (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
) ENGINE=ROCKSDB
PARTITION BY HASH( YEAR(hired) )
PARTITIONS 4;
CREATE TABLE t1_hash (
col1 INT,
col2 CHAR(5),
col3 DATE
) ENGINE=ROCKSDB
PARTITION BY HASH( YEAR(col3) )
PARTITIONS 4;
CREATE TABLE employees_linear_hash (
id INT NOT NULL,
fname VARCHAR(30),
lname VARCHAR(30),
hired DATE NOT NULL DEFAULT '1970-01-01',
separated DATE NOT NULL DEFAULT '9999-12-31',
job_code INT,
store_id INT
) ENGINE=ROCKSDB
PARTITION BY LINEAR HASH( YEAR(hired) )
PARTITIONS 4;
CREATE TABLE t1_linear_hash (
col1 INT,
col2 CHAR(5),
col3 DATE
) ENGINE=ROCKSDB
PARTITION BY LINEAR HASH( YEAR(col3) )
PARTITIONS 6;
CREATE TABLE k1 (
id INT NOT NULL PRIMARY KEY,
name VARCHAR(20)
) ENGINE=ROCKSDB
PARTITION BY KEY()
PARTITIONS 2;
CREATE TABLE k2 (
id INT NOT NULL,
name VARCHAR(20),
UNIQUE KEY (id)
) ENGINE=ROCKSDB
PARTITION BY KEY()
PARTITIONS 2;
CREATE TABLE tm1 (
s1 CHAR(32) PRIMARY KEY
) ENGINE=ROCKSDB
PARTITION BY KEY(s1)
PARTITIONS 10;
CREATE TABLE tk (
col1 INT NOT NULL,
col2 CHAR(5),
col3 DATE
) ENGINE=ROCKSDB
PARTITION BY LINEAR KEY (col1)
PARTITIONS 3;
CREATE TABLE ts (
id INT,
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(purchased) )
SUBPARTITION BY HASH( TO_DAYS(purchased) )
SUBPARTITIONS 2 (
PARTITION p0 VALUES LESS THAN (1990),
PARTITION p1 VALUES LESS THAN (2000),
PARTITION p2 VALUES LESS THAN MAXVALUE
);
CREATE TABLE ts_1 (
id INT,
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(purchased) )
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
PARTITION p0 VALUES LESS THAN (1990) (
SUBPARTITION s0,
SUBPARTITION s1
),
PARTITION p1 VALUES LESS THAN (2000) (
SUBPARTITION s2,
SUBPARTITION s3
),
PARTITION p2 VALUES LESS THAN MAXVALUE (
SUBPARTITION s4,
SUBPARTITION s5
)
);
--error 1064
CREATE TABLE ts_2 (
id INT,
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(purchased) )
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
PARTITION p0 VALUES LESS THAN (1990) (
SUBPARTITION s0,
SUBPARTITION s1
),
PARTITION p1 VALUES LESS THAN (2000),
PARTITION p2 VALUES LESS THAN MAXVALUE (
SUBPARTITION s2,
SUBPARTITION s3
)
);
CREATE TABLE ts_3 (
id INT,
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(purchased) )
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
PARTITION p0 VALUES LESS THAN (1990) (
SUBPARTITION s0,
SUBPARTITION s1
),
PARTITION p1 VALUES LESS THAN (2000) (
SUBPARTITION s2,
SUBPARTITION s3
),
PARTITION p2 VALUES LESS THAN MAXVALUE (
SUBPARTITION s4,
SUBPARTITION s5
)
);
CREATE TABLE ts_4 (
id INT,
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(purchased) )
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
PARTITION p0 VALUES LESS THAN (1990) (
SUBPARTITION s0,
SUBPARTITION s1
),
PARTITION p1 VALUES LESS THAN (2000) (
SUBPARTITION s2,
SUBPARTITION s3
),
PARTITION p2 VALUES LESS THAN MAXVALUE (
SUBPARTITION s4,
SUBPARTITION s5
)
);
CREATE TABLE ts_5 (
id INT,
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE(YEAR(purchased))
SUBPARTITION BY HASH( TO_DAYS(purchased) ) (
PARTITION p0 VALUES LESS THAN (1990) (
SUBPARTITION s0a,
SUBPARTITION s0b
),
PARTITION p1 VALUES LESS THAN (2000) (
SUBPARTITION s1a,
SUBPARTITION s1b
),
PARTITION p2 VALUES LESS THAN MAXVALUE (
SUBPARTITION s2a,
SUBPARTITION s2b
)
);
CREATE TABLE trb3 (
id INT,
name VARCHAR(50),
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(purchased) ) (
PARTITION p0 VALUES LESS THAN (1990),
PARTITION p1 VALUES LESS THAN (1995),
PARTITION p2 VALUES LESS THAN (2000),
PARTITION p3 VALUES LESS THAN (2005)
);
ALTER TABLE trb3 PARTITION BY KEY(id) PARTITIONS 2;
CREATE TABLE tr (
id INT,
name VARCHAR(50),
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(purchased) ) (
PARTITION p0 VALUES LESS THAN (1990),
PARTITION p1 VALUES LESS THAN (1995),
PARTITION p2 VALUES LESS THAN (2000),
PARTITION p3 VALUES LESS THAN (2005)
);
INSERT INTO tr VALUES
(1, 'desk organiser', '2003-10-15'),
(2, 'CD player', '1993-11-05'),
(3, 'TV set', '1996-03-10'),
(4, 'bookcase', '1982-01-10'),
(5, 'exercise bike', '2004-05-09'),
(6, 'sofa', '1987-06-05'),
(7, 'popcorn maker', '2001-11-22'),
(8, 'aquarium', '1992-08-04'),
(9, 'study desk', '1984-09-16'),
(10, 'lava lamp', '1998-12-25');
SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31';
ALTER TABLE tr DROP PARTITION p2;
SELECT * FROM tr WHERE purchased BETWEEN '1995-01-01' AND '1999-12-31';
CREATE TABLE members_3 (
id INT,
fname VARCHAR(25),
lname VARCHAR(25),
dob DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE( YEAR(dob) ) (
PARTITION p0 VALUES LESS THAN (1970),
PARTITION p1 VALUES LESS THAN (1980),
PARTITION p2 VALUES LESS THAN (1990)
);
ALTER TABLE members_3 ADD PARTITION (PARTITION p3 VALUES LESS THAN (2000));
# ERROR 1493 (HY000): VALUES LESS THAN value must be strictly increasing for each partition
--error 1493
ALTER TABLE members_3 ADD PARTITION (PARTITION n VALUES LESS THAN (1960));
CREATE TABLE clients (
id INT,
fname VARCHAR(30),
lname VARCHAR(30),
signed DATE
) ENGINE=ROCKSDB
PARTITION BY HASH( MONTH(signed) )
PARTITIONS 12;
ALTER TABLE clients COALESCE PARTITION 4;
CREATE TABLE clients_lk (
id INT,
fname VARCHAR(30),
lname VARCHAR(30),
signed DATE
) ENGINE=ROCKSDB
PARTITION BY LINEAR KEY(signed)
PARTITIONS 12;
# ERROR 1508 (HY000): Cannot remove all partitions, use DROP TABLE instead
--error 1508
ALTER TABLE clients COALESCE PARTITION 18;
ALTER TABLE clients ADD PARTITION PARTITIONS 6;
CREATE TABLE trb1 (
id INT,
name VARCHAR(50),
purchased DATE
) ENGINE=ROCKSDB
PARTITION BY RANGE(id) (
PARTITION p0 VALUES LESS THAN (3),
PARTITION p1 VALUES LESS THAN (7),
PARTITION p2 VALUES LESS THAN (9),
PARTITION p3 VALUES LESS THAN (11)
);
INSERT INTO trb1 VALUES
(1, 'desk organiser', '2003-10-15'),
(2, 'CD player', '1993-11-05'),
(3, 'TV set', '1996-03-10'),
(4, 'bookcase', '1982-01-10'),
(5, 'exercise bike', '2004-05-09'),
(6, 'sofa', '1987-06-05'),
(7, 'popcorn maker', '2001-11-22'),
(8, 'aquarium', '1992-08-04'),
(9, 'study desk', '1984-09-16'),
(10, 'lava lamp', '1998-12-25');
ALTER TABLE trb1 ADD PRIMARY KEY (id);
# Clean up.
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS VAR_POP;
DROP TABLE IF EXISTS TEMP0;
DROP TABLE IF EXISTS VAR_SAMP;
DROP TABLE IF EXISTS ti;
DROP TABLE IF EXISTS members;
DROP TABLE IF EXISTS members_2;
DROP TABLE IF EXISTS employees;
DROP TABLE IF EXISTS employees_2;
DROP TABLE IF EXISTS employees_3;
DROP TABLE IF EXISTS quarterly_report_status;
DROP TABLE IF EXISTS employees_4;
DROP TABLE IF EXISTS h2;
DROP TABLE IF EXISTS rcx;
DROP TABLE IF EXISTS r1;
DROP TABLE IF EXISTS rc1;
DROP TABLE IF EXISTS rx;
DROP TABLE IF EXISTS rc2;
DROP TABLE IF EXISTS rc3;
DROP TABLE IF EXISTS rc4;
DROP TABLE IF EXISTS employees_by_lname;
DROP TABLE IF EXISTS customers_1;
DROP TABLE IF EXISTS customers_2;
DROP TABLE IF EXISTS customers_3;
DROP TABLE IF EXISTS employees_hash;
DROP TABLE IF EXISTS employees_hash_1;
DROP TABLE IF EXISTS t1_hash;
DROP TABLE IF EXISTS employees_linear_hash;
DROP TABLE IF EXISTS t1_linear_hash;
DROP TABLE IF EXISTS k1;
DROP TABLE IF EXISTS k2;
DROP TABLE IF EXISTS tm1;
DROP TABLE IF EXISTS tk;
DROP TABLE IF EXISTS ts;
DROP TABLE IF EXISTS ts_1;
DROP TABLE IF EXISTS ts_3;
DROP TABLE IF EXISTS ts_4;
DROP TABLE IF EXISTS ts_5;
DROP TABLE IF EXISTS trb3;
DROP TABLE IF EXISTS tr;
DROP TABLE IF EXISTS members_3;
DROP TABLE IF EXISTS clients;
DROP TABLE IF EXISTS clients_lk;
DROP TABLE IF EXISTS trb1;
......@@ -11,7 +11,7 @@ DROP TABLE IF EXISTS t1;
# restart server with correct parameters
shutdown_server 10;
--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size=1000000000" >$_expect_file_name
--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size_mb=100" >$_expect_file_name
--sleep 5
--enable_reconnect
--source include/wait_until_connected_again.inc
......@@ -29,7 +29,7 @@ select * from t1 where a = 1;
# restart server to re-read cache
--exec echo "wait" >$_expect_file_name
shutdown_server 10;
--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size=1000000000" >$_expect_file_name
--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size_mb=100" >$_expect_file_name
--sleep 5
--enable_reconnect
--source include/wait_until_connected_again.inc
......
......@@ -792,6 +792,20 @@ insert into t47 values (2, 'row2');
set rocksdb_bulk_load=1;
insert into t47 values (3, 'row3'),(4, 'row4');
set rocksdb_bulk_load=0;
# Check concurrent bulk loading
--connect (con1,localhost,root,,)
set rocksdb_bulk_load=1;
insert into t47 values (10, 'row10'),(11, 'row11');
--connection default
set rocksdb_bulk_load=1;
insert into t47 values (100, 'row100'),(101, 'row101');
--disconnect con1
--connection default
set rocksdb_bulk_load=0;
--disable_query_log
let $wait_condition = select count(*) = 8 as c from t47;
--source include/wait_condition.inc
--enable_query_log
select * from t47;
drop table t47;
......
--source include/have_rocksdb.inc
--disable_warnings
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
--enable_warnings
#
# Create a table with multiple partitions, but in the comment don't specify
# that per-partition based column families (CF) should be created. Expect that
# default CF will be used and new one won't be created.
#
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'testcomment'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
# Expecting no results here.
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='testcomment';
DROP TABLE t1;
#
# Same test case as above, only with the reverse CF. Should result in the same
# behavior. No new CF-s created, only default one will be used.
#
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'rev:testrevcomment'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
# Expecting no results here.
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:testrevcomment';
DROP TABLE t1;
#
# Create a table with multiple partitions and request for separate CF to be
# created per every partition. As a result we expect three different CF-s to be
# created.
#
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=my_custom_cf;custom_p2_cfname=baz'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
set @@global.rocksdb_compact_cf = 'foo';
set @@global.rocksdb_compact_cf = 'my_custom_cf';
set @@global.rocksdb_compact_cf = 'baz';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='foo';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_custom_cf';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='baz';
DROP TABLE t1;
#
# Same test case as above, only one of the partitions has "rev:" prefix. The
# intent here is to make sure that qualifier can specify reverse CF as well.
#
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=t1-p0;custom_p1_cfname=rev:bar;custom_p2_cfname=t1-p2'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
set @@global.rocksdb_compact_cf = 't1-p0';
set @@global.rocksdb_compact_cf = 'rev:bar';
set @@global.rocksdb_compact_cf = 't1-p2';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p0';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='rev:bar';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='t1-p2';
DROP TABLE t1;
#
# Create a table with multiple partitions and assign two partitions to the same
# CF, third one gets a separate partition, and fourth one will belong to a
# default one. As a result we expect two new CF-s to be created.
#
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=cf-zero;custom_p1_cfname=cf-one;custom_p2_cfname=cf-zero'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9),
PARTITION custom_p3 VALUES IN (10, 20, 30)
);
set @@global.rocksdb_compact_cf = 'cf-zero';
set @@global.rocksdb_compact_cf = 'cf-one';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-zero';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='cf-one';
DROP TABLE t1;
#
# Create a table with CF-s per partition and verify that ALTER TABLE + DROP
# INDEX work for that scenario and data is persisted.
#
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
INSERT INTO t1 VALUES (1, 1, "one", null);
INSERT INTO t1 VALUES (2, 2, "two", null);
INSERT INTO t1 VALUES (3, 3, "three", null);
INSERT INTO t1 VALUES (5, 5, "five", null);
INSERT INTO t1 VALUES (9, 9, "nine", null);
SELECT * FROM t1;
ALTER TABLE t1 DROP PRIMARY KEY;
SELECT * FROM t1;
#
# Verify that we can compact custom CF-s.
#
set @@global.rocksdb_compact_cf = 'foo';
set @@global.rocksdb_compact_cf = 'bar';
set @@global.rocksdb_compact_cf = 'baz';
DROP TABLE t1;
#
# Create a table with CF-s per partition and verify that ALTER TABLE + DROP
# INDEX + ADD INDEX work for that scenario and data is persisted and new cf_name_str
# are created.
#
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
INSERT INTO t1 VALUES (1, 1, "one", null);
INSERT INTO t1 VALUES (2, 2, "two", null);
INSERT INTO t1 VALUES (3, 3, "three", null);
INSERT INTO t1 VALUES (5, 5, "five", null);
INSERT INTO t1 VALUES (9, 9, "nine", null);
ALTER TABLE t1 DROP PRIMARY KEY;
ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=p0_cf;custom_p1_cfname=p1_cf';
set @@global.rocksdb_compact_cf = 'p0_cf';
set @@global.rocksdb_compact_cf = 'p1_cf';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p0_cf';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p1_cf';
DROP TABLE t1;
#
# Create a table CF-s per partition, use ALTER TABLE to change the way it's
# partitioned and verify that new CF-s will be created.
#
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=bar;custom_p2_cfname=baz'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
INSERT INTO t1 VALUES (1, 1, "one", null);
INSERT INTO t1 VALUES (2, 2, "two", null);
INSERT INTO t1 VALUES (3, 3, "three", null);
INSERT INTO t1 VALUES (5, 5, "five", null);
INSERT INTO t1 VALUES (9, 9, "nine", null);
ALTER TABLE t1 PARTITION BY LIST(c1) (
PARTITION custom_p3 VALUES IN (1, 4, 7),
PARTITION custom_p4 VALUES IN (2, 5, 8, 3, 6, 9)
);
ALTER TABLE t1 DROP PRIMARY KEY;
ALTER TABLE t1 ADD PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p3_cfname=p3_cf;custom_p4_cfname=p4_cf';
set @@global.rocksdb_compact_cf = 'p3_cf';
set @@global.rocksdb_compact_cf = 'p4_cf';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p3_cf';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='p4_cf';
DROP TABLE t1;
#
# Create a table CF-s per partition, use empty qualifier name. Verify that no
# new CF-s are created. This will also make sure that nothing gets added for
# `custom_p2`.
#
CREATE TABLE t1 (
c1 INT,
c2 INT,
name VARCHAR(25) NOT NULL,
event DATE,
PRIMARY KEY (`c1`, `c2`) COMMENT 'custom_p0_cfname=foo;custom_p1_cfname=;'
) ENGINE=ROCKSDB
PARTITION BY LIST(c1) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
DROP TABLE t1;
#
# Verify some basic partition related operations when using PARTITION BY LIST
# COLUMNS on a VARBINARY column on a table with more complicated schema.
#
#
# Verify that creating the table without COMMENT actually works.
#
CREATE TABLE `t2` (
`col1` bigint(20) NOT NULL,
`col2` varbinary(64) NOT NULL,
`col3` varbinary(256) NOT NULL,
`col4` bigint(20) NOT NULL,
`col5` mediumblob NOT NULL,
PRIMARY KEY (`col1`,`col2`,`col3`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
PARTITION BY LIST COLUMNS (`col2`) (
PARTITION custom_p0 VALUES IN (0x12345),
PARTITION custom_p1 VALUES IN (0x23456),
PARTITION custom_p2 VALUES IN (0x34567),
PARTITION custom_p3 VALUES IN (0x45678),
PARTITION custom_p4 VALUES IN (0x56789),
PARTITION custom_p5 VALUES IN (0x6789A),
PARTITION custom_p6 VALUES IN (0x789AB),
PARTITION custom_p7 VALUES IN (0x89ABC)
);
DROP TABLE t2;
#
# Create the same table with two custom CF-s per partition as specified in the
# COMMENT.
#
CREATE TABLE `t2` (
`col1` bigint(20) NOT NULL,
`col2` varbinary(64) NOT NULL,
`col3` varbinary(256) NOT NULL,
`col4` bigint(20) NOT NULL,
`col5` mediumblob NOT NULL,
PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=my_cf0;custom_p1_cfname=my_cf1'
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
PARTITION BY LIST COLUMNS (`col2`) (
PARTITION custom_p0 VALUES IN (0x12345),
PARTITION custom_p1 VALUES IN (0x23456),
PARTITION custom_p2 VALUES IN (0x34567),
PARTITION custom_p3 VALUES IN (0x45678),
PARTITION custom_p4 VALUES IN (0x56789),
PARTITION custom_p5 VALUES IN (0x6789A),
PARTITION custom_p6 VALUES IN (0x789AB),
PARTITION custom_p7 VALUES IN (0x89ABC)
);
# Verify that CF-s were created earlier.
set @@global.rocksdb_compact_cf = 'my_cf0';
set @@global.rocksdb_compact_cf = 'my_cf1';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf0';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='my_cf1';
# Insert some random data.
INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2);
INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2);
INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2);
INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3);
INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5);
INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5);
# Verify it's there.
SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2;
# Verify it's being fetched from the right partition. This tests partitioning
# functionality, but we want to make sure that by adding CF-s per partition we
# don't regress anything.
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345;
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456;
# Delete the current PK and create a new one referencing different CF-s. We
# need to verity that new CF-s will be created and no data will be lost in
# process.
ALTER TABLE t2 DROP PRIMARY KEY;
ALTER TABLE t2 ADD PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=new_cf0;custom_p1_cfname=new_cf1';
# Verify that new CF-s are created as well.
set @@global.rocksdb_compact_cf = 'new_cf0';
set @@global.rocksdb_compact_cf = 'new_cf1';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf0';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='new_cf1';
# Insert some more random data.
INSERT INTO t2 VALUES (500, 0x12345, 0x5, 1, 0x2);
INSERT INTO t2 VALUES (700, 0x23456, 0x7, 1, 0x3);
# Verify that partition mappings are still intact.
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x12345;
EXPLAIN PARTITIONS SELECT HEX(col2) FROM t2 where col2 = 0x23456;
# Verify that no data is lost.
SELECT col1, HEX(col2), HEX(col3), col4, HEX(col5) FROM t2;
DROP TABLE t2;
#
# Create the same table with two custom CF-s per partition as specified in the
# COMMENT. Use both the PK and SK when creating the table.
#
CREATE TABLE `t2` (
`col1` bigint(20) NOT NULL,
`col2` varbinary(64) NOT NULL,
`col3` varbinary(256) NOT NULL,
`col4` bigint(20) NOT NULL,
`col5` mediumblob NOT NULL,
PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1',
KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=test_cf5'
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
PARTITION BY LIST COLUMNS (`col2`) (
PARTITION custom_p0 VALUES IN (0x12345),
PARTITION custom_p1 VALUES IN (0x23456),
PARTITION custom_p2 VALUES IN (0x34567),
PARTITION custom_p3 VALUES IN (0x45678),
PARTITION custom_p4 VALUES IN (0x56789),
PARTITION custom_p5 VALUES IN (0x6789A),
PARTITION custom_p6 VALUES IN (0x789AB),
PARTITION custom_p7 VALUES IN (0x89ABC)
);
# Verify that CF-s were created for PK.
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf0';
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf1';
# Verify that CF-s were created for SK.
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='test_cf5';
# Insert some random data.
INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2);
INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2);
INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2);
INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3);
INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5);
INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5);
INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7);
# Basic verification that correct partition and key are used when searching.
EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col2 = 0x6789A AND col4 = 1;
# Remove the key.
ALTER TABLE t2 DROP KEY `col2`;
# Add a new key and expect new CF to be created as well.
ALTER TABLE t2 ADD KEY (`col3`, `col4`) COMMENT 'custom_p5_cfname=another_cf_for_p5';
# Verify that CF-s were created for SK.
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='another_cf_for_p5';
# Verify that correct partition and key are used when searching.
EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col3 = 0x4 AND col2 = 0x34567;
DROP TABLE t2;
#
# Verify the same scenario as before, but with a UNIQUE KEY in addition to PK.
#
CREATE TABLE `t2` (
`col1` bigint(20) NOT NULL,
`col2` varbinary(64) NOT NULL,
`col3` varbinary(256) NOT NULL,
`col4` bigint(20) NOT NULL,
`col5` mediumblob NOT NULL,
PRIMARY KEY (`col1`,`col2`,`col3`) COMMENT 'custom_p0_cfname=test_cf0;custom_p1_cfname=test_cf1',
UNIQUE KEY (`col2`, `col4`) COMMENT 'custom_p5_cfname=unique_test_cf5'
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
PARTITION BY LIST COLUMNS (`col2`) (
PARTITION custom_p0 VALUES IN (0x12345),
PARTITION custom_p1 VALUES IN (0x23456),
PARTITION custom_p2 VALUES IN (0x34567),
PARTITION custom_p3 VALUES IN (0x45678),
PARTITION custom_p4 VALUES IN (0x56789),
PARTITION custom_p5 VALUES IN (0x6789A),
PARTITION custom_p6 VALUES IN (0x789AB),
PARTITION custom_p7 VALUES IN (0x89ABC)
);
# Verify that CF-s were created for SK.
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='unique_test_cf5';
INSERT INTO t2 VALUES (100, 0x12345, 0x1, 1, 0x2);
--error ER_DUP_ENTRY
INSERT INTO t2 VALUES (200, 0x12345, 0x1, 1, 0x2);
--error ER_DUP_ENTRY
INSERT INTO t2 VALUES (300, 0x12345, 0x1, 1, 0x2);
INSERT INTO t2 VALUES (100, 0x23456, 0x2, 1, 0x3);
INSERT INTO t2 VALUES (100, 0x34567, 0x4, 1, 0x5);
INSERT INTO t2 VALUES (400, 0x89ABC, 0x4, 1, 0x5);
INSERT INTO t2 VALUES (500, 0x6789A, 0x5, 1, 0x7);
DROP TABLE t2;
#
# Verify that both partitioned and non-partitioned table can share a CF.
#
CREATE TABLE t1 (
`a` int,
PRIMARY KEY (a) COMMENT "sharedcf"
) ENGINE=ROCKSDB;
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='sharedcf';
#
# We expect this to succeed.
#
CREATE TABLE t2 (
`a` INT,
`b` DATE,
`c` VARCHAR(42),
PRIMARY KEY (`a`) COMMENT "custom_p0_cfname=sharedcf;custom_p2_cfname=notsharedcf"
) ENGINE=ROCKSDB
PARTITION BY LIST(`a`) (
PARTITION custom_p0 VALUES IN (1, 4, 7),
PARTITION custom_p1 VALUES IN (2, 5, 8),
PARTITION custom_p2 VALUES IN (3, 6, 9)
);
SELECT DISTINCT(cf_name) FROM information_schema.rocksdb_cfstats WHERE cf_name='notsharedcf';
DROP TABLE IF EXISTS t1;
DROP TABLE IF EXISTS t2;
......@@ -16,6 +16,10 @@ DROP TABLE IF EXISTS t4;
CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT 'cf_t1') ENGINE = ROCKSDB;
CREATE TABLE t2 (j INT, PRIMARY KEY (j) COMMENT 'rev:cf_t2') ENGINE = ROCKSDB;
CREATE TABLE t3 (k INT, PRIMARY KEY (k) COMMENT 'cf_t1') ENGINE = ROCKSDB;
# With partition based column family creation we now expect all the partitions
# to belong to a default column family because mapping wasn't specified in
# this case.
CREATE TABLE t4 (l INT, PRIMARY KEY (l) COMMENT 'cf_t4') ENGINE = ROCKSDB
PARTITION BY KEY(l) PARTITIONS 4;
......
--source include/have_rocksdb.inc
#
# Check whether DATA DIRECTORY and INDEX DIRECTORY
# are supported in CREATE and ALTER TABLE
# Check that when either DATA DIRECTORY or INDEX DIRECTORY are specified
# then MyRocks returns an appropriate error. We don't support this
# functionality and therefore shouldn't just silently accept the values.
#
# Note: the test does not check whether the options
# have any real effect on the table, only
# that they are accepted
# (and apparently ignored)
#
--let $data_dir = $MYSQLTEST_VARDIR/storage_engine_data_dir/
--let $index_dir = $MYSQLTEST_VARDIR/storage_engine_index_dir/
--mkdir $data_dir
--mkdir $index_dir
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
--replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR>
eval CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '$data_dir' INDEX DIRECTORY = '$index_dir';
--replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR>
SHOW CREATE TABLE t1;
--replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR>
eval ALTER TABLE t1 INDEX DIRECTORY = '$data_dir';
--replace_result $data_dir <DATA_DIR> $index_dir <INDEX_DIR>
SHOW CREATE TABLE t1;
--error 1296
eval CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '/foo/bar/data';
DROP TABLE t1;
--rmdir $data_dir
--rmdir $index_dir
--error 1296
eval CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INDEX DIRECTORY = '/foo/bar/index';
#
# Verify that we'll get the same error codes when using the partitions.
#
--error 1296
CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=rocksdb PARTITION BY RANGE (id)
(
PARTITION P0 VALUES LESS THAN (1000)
DATA DIRECTORY = '/foo/bar/data/',
PARTITION P1 VALUES LESS THAN (2000)
DATA DIRECTORY = '/foo/bar/data/',
PARTITION P2 VALUES LESS THAN (MAXVALUE)
);
--error 1296
CREATE TABLE t1 (id int not null primary key) ENGINE=rocksdb PARTITION BY RANGE (id)
(
PARTITION P0 VALUES LESS THAN (1000)
INDEX DIRECTORY = '/foo/bar/data/',
PARTITION P1 VALUES LESS THAN (2000)
INDEX DIRECTORY = '/foo/bar/data/',
PARTITION P2 VALUES LESS THAN (MAXVALUE)
);
......@@ -6,10 +6,9 @@ select * from t1 where id=5;
select * from t1 where value=5;
select value from t1 where value=5;
select * from t1 where value2=5;
select * from t1 where id < 3;
select * from t1 where value < 3;
select value from t1 where value < 3;
select * from t1 where value2 < 3;
select * from t1;
select value from t1;
select * from t1 where id < 3 order by id;
select * from t1 where value < 3 order by id;
select value from t1 where value < 3 order by id;
select * from t1 where value2 < 3 order by id;
select * from t1 order by id;
select value from t1 order by id;
......@@ -143,3 +143,29 @@ disconnect con1;
disconnect con2;
disconnect con3;
drop table t1, t2;
# skip_unique checks should skip checks only for tables that don't have
# secondary indexes
connection default;
--disable_warnings
drop table if exists t1,t2,t3;
--enable_warnings
# table with PK only
create table t1 (id int, value int, primary key (id)) engine=rocksdb;
# table with PK and SK
create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb;
# table with hidden PK
create table t3 (id int, value int) engine=rocksdb;
SET @old_val = @@session.unique_checks;
set @@session.unique_checks = FALSE;
insert into t1 values (1, 1), (1, 2);
--error ER_DUP_ENTRY
insert into t2 values (1, 1, 1), (1, 2, 1);
insert into t3 values (1, 1), (1, 1);
set @@session.unique_checks = @old_val;
# cleanup
drop table t1, t2, t3;
......@@ -5,7 +5,7 @@ SET GLOBAL rocksdb_write_ignore_missing_column_families=true;
create table aaa (id int primary key, i int) engine rocksdb;
SET LOCAL rocksdb_write_sync=off;
SET LOCAL rocksdb_flush_log_at_trx_commit=0;
--exec sleep 30
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced';
insert aaa(id, i) values(1,1);
......@@ -15,7 +15,7 @@ select variable_value-@a from information_schema.global_status where variable_na
insert aaa(id, i) values(3,1);
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
SET LOCAL rocksdb_write_sync=1;
SET LOCAL rocksdb_flush_log_at_trx_commit=1;
insert aaa(id, i) values(4,1);
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
insert aaa(id, i) values(5,1);
......@@ -24,7 +24,7 @@ insert aaa(id, i) values(6,1);
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
SET GLOBAL rocksdb_background_sync=on;
SET LOCAL rocksdb_write_sync=off;
SET LOCAL rocksdb_flush_log_at_trx_commit=0;
insert aaa(id, i) values(7,1);
let $status_var=rocksdb_wal_synced;
......@@ -35,7 +35,7 @@ truncate table aaa;
# Cleanup
drop table aaa;
SET GLOBAL rocksdb_write_sync=off;
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
SET GLOBAL rocksdb_write_disable_wal=false;
SET GLOBAL rocksdb_write_ignore_missing_column_families=false;
SET GLOBAL rocksdb_background_sync=off;
......
......@@ -3,9 +3,11 @@
[mysqld.1]
log_slave_updates
rocksdb_enable_2pc=OFF
rocksdb_wal_recovery_mode=2
[mysqld.2]
relay_log_recovery=1
relay_log_info_repository=TABLE
log_slave_updates
rocksdb_enable_2pc=OFF
rocksdb_wal_recovery_mode=2
--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates
--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates --rocksdb_enable_2pc=OFF
--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates
--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates --rocksdb_enable_2pc=OFF
--sync_binlog=1000 --relay_log_recovery=1
......@@ -5,6 +5,7 @@ log_slave_updates
gtid_mode=ON
enforce_gtid_consistency=ON
rocksdb_enable_2pc=OFF
rocksdb_wal_recovery_mode=2
[mysqld.2]
sync_relay_log_info=100
......@@ -14,3 +15,4 @@ log_slave_updates
gtid_mode=ON
enforce_gtid_consistency=ON
rocksdb_enable_2pc=OFF
rocksdb_wal_recovery_mode=2
--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates --rocksdb_write_sync=ON --rocksdb_write_disable_wal=OFF
--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates --rocksdb_flush_log_at_trx_commit=1 --rocksdb_write_disable_wal=OFF
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO valid_values VALUES(1);
INSERT INTO valid_values VALUES(0);
INSERT INTO valid_values VALUES('on');
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO invalid_values VALUES('\'aaa\'');
INSERT INTO invalid_values VALUES('\'bbb\'');
SET @start_global_value = @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
SELECT @start_global_value;
@start_global_value
0
SET @start_session_value = @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
SELECT @start_session_value;
@start_session_value
0
'# Setting to valid values in global scope#'
"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 1"
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 1;
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT;
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
0
"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 0"
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 0;
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
0
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT;
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
0
"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to on"
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = on;
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT;
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
0
'# Setting to valid values in session scope#'
"Trying to set variable @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 1"
SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 1;
SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
1
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT;
SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
0
"Trying to set variable @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 0"
SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 0;
SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
0
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT;
SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
0
"Trying to set variable @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to on"
SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = on;
SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
1
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = DEFAULT;
SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
0
'# Testing with invalid values in global scope #'
"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 'aaa'"
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 'aaa';
Got one of the listed errors
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
0
"Trying to set variable @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY to 'bbb'"
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = 'bbb';
Got one of the listed errors
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
0
SET @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = @start_global_value;
SELECT @@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@global.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
0
SET @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY = @start_session_value;
SELECT @@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY;
@@session.ROCKSDB_BLIND_DELETE_PRIMARY_KEY
0
DROP TABLE valid_values;
DROP TABLE invalid_values;
call mtr.add_suppression(" Column family '[a-z]*' not found.");
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO valid_values VALUES('abc');
INSERT INTO valid_values VALUES('def');
......
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO valid_values VALUES(100);
INSERT INTO valid_values VALUES(1);
INSERT INTO valid_values VALUES(0);
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO invalid_values VALUES('\'aaa\'');
INSERT INTO invalid_values VALUES('\'bbb\'');
INSERT INTO invalid_values VALUES('\'-1\'');
INSERT INTO invalid_values VALUES('\'101\'');
INSERT INTO invalid_values VALUES('\'484436\'');
SET @start_global_value = @@global.ROCKSDB_DELAYED_WRITE_RATE;
SELECT @start_global_value;
@start_global_value
16777216
'# Setting to valid values in global scope#'
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 100"
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 100;
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
@@global.ROCKSDB_DELAYED_WRITE_RATE
100
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = DEFAULT;
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
@@global.ROCKSDB_DELAYED_WRITE_RATE
16777216
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 1"
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 1;
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
@@global.ROCKSDB_DELAYED_WRITE_RATE
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = DEFAULT;
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
@@global.ROCKSDB_DELAYED_WRITE_RATE
16777216
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 0"
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 0;
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
@@global.ROCKSDB_DELAYED_WRITE_RATE
0
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = DEFAULT;
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
@@global.ROCKSDB_DELAYED_WRITE_RATE
16777216
"Trying to set variable @@session.ROCKSDB_DELAYED_WRITE_RATE to 444. It should fail because it is not session."
SET @@session.ROCKSDB_DELAYED_WRITE_RATE = 444;
ERROR HY000: Variable 'rocksdb_delayed_write_rate' is a GLOBAL variable and should be set with SET GLOBAL
'# Testing with invalid values in global scope #'
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 'aaa'"
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 'aaa';
Got one of the listed errors
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
@@global.ROCKSDB_DELAYED_WRITE_RATE
16777216
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to 'bbb'"
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = 'bbb';
Got one of the listed errors
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
@@global.ROCKSDB_DELAYED_WRITE_RATE
16777216
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to '-1'"
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = '-1';
Got one of the listed errors
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
@@global.ROCKSDB_DELAYED_WRITE_RATE
16777216
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to '101'"
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = '101';
Got one of the listed errors
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
@@global.ROCKSDB_DELAYED_WRITE_RATE
16777216
"Trying to set variable @@global.ROCKSDB_DELAYED_WRITE_RATE to '484436'"
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = '484436';
Got one of the listed errors
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
@@global.ROCKSDB_DELAYED_WRITE_RATE
16777216
SET @@global.ROCKSDB_DELAYED_WRITE_RATE = @start_global_value;
SELECT @@global.ROCKSDB_DELAYED_WRITE_RATE;
@@global.ROCKSDB_DELAYED_WRITE_RATE
16777216
DROP TABLE valid_values;
DROP TABLE invalid_values;
SET @start_global_value = @@global.ROCKSDB_DISABLEDATASYNC;
SELECT @start_global_value;
@start_global_value
0
"Trying to set variable @@global.ROCKSDB_DISABLEDATASYNC to 444. It should fail because it is readonly."
SET @@global.ROCKSDB_DISABLEDATASYNC = 444;
ERROR HY000: Variable 'rocksdb_disabledatasync' is a read only variable
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO valid_values VALUES(2);
INSERT INTO valid_values VALUES(1);
INSERT INTO valid_values VALUES(0);
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO invalid_values VALUES('\'aaa\'');
SET @start_global_value = @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
SELECT @start_global_value;
@start_global_value
1
SET @start_session_value = @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
SELECT @start_session_value;
@start_session_value
1
'# Setting to valid values in global scope#'
"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 2"
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 2;
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
2
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT;
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
1
"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 1"
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 1;
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT;
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
1
"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 0"
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 0;
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
0
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT;
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
1
'# Setting to valid values in session scope#'
"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 2"
SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 2;
SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
2
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT;
SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
1
"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 1"
SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 1;
SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
1
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT;
SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
1
"Trying to set variable @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 0"
SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 0;
SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
0
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = DEFAULT;
SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
1
'# Testing with invalid values in global scope #'
"Trying to set variable @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT to 'aaa'"
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = 'aaa';
Got one of the listed errors
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
1
SET @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = @start_global_value;
SELECT @@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
@@global.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
1
SET @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT = @start_session_value;
SELECT @@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT;
@@session.ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
1
DROP TABLE valid_values;
DROP TABLE invalid_values;
......@@ -21,7 +21,7 @@ Table Op Msg_type Msg_text
test.t1 analyze status OK
SHOW INDEXES FROM t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE
t1 0 PRIMARY 1 a A 3 NULL NULL LSMTREE
set session rocksdb_flush_memtable_on_analyze=on;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
......@@ -48,11 +48,11 @@ a b
3 3
SHOW TABLE STATUS LIKE 't1';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 ROCKSDB 10 Fixed 0 0 69 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL
t1 ROCKSDB 10 Fixed # # 69 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
SHOW TABLE STATUS LIKE 't1';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 ROCKSDB 10 Fixed 3 8 24 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL
t1 ROCKSDB 10 Fixed # # 24 0 0 0 4 NULL NULL NULL latin1_swedish_ci NULL
DROP TABLE t1;
DROP TABLE IF EXISTS t;
CREATE TABLE t (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
INSERT INTO t (a,b) VALUES (1,'bar'),(2,'foo');
SET @ORIG_PAUSE_BACKGROUND_WORK = @@rocksdb_force_compute_memtable_stats;
set global rocksdb_force_flush_memtable_now = true;
INSERT INTO t (a,b) VALUES (3,'dead'),(4,'beef'),(5,'a'),(6,'bbb'),(7,'c'),(8,'d');
set global rocksdb_force_compute_memtable_stats=0;
SELECT TABLE_ROWS INTO @ROWS_EXCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't';
set global rocksdb_force_compute_memtable_stats=1;
SELECT TABLE_ROWS INTO @ROWS_INCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't';
select case when @ROWS_INCLUDE_MEMTABLE-@ROWS_EXCLUDE_MEMTABLE > 0 then 'true' else 'false' end;
case when @ROWS_INCLUDE_MEMTABLE-@ROWS_EXCLUDE_MEMTABLE > 0 then 'true' else 'false' end
true
DROP TABLE t;
set global rocksdb_force_compute_memtable_stats = @ORIG_PAUSE_BACKGROUND_WORK;
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO valid_values VALUES(1);
INSERT INTO valid_values VALUES(0);
INSERT INTO valid_values VALUES('on');
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO invalid_values VALUES('\'aaa\'');
INSERT INTO invalid_values VALUES('\'bbb\'');
SET @start_global_value = @@global.ROCKSDB_MASTER_SKIP_TX_API;
SELECT @start_global_value;
@start_global_value
0
SET @start_session_value = @@session.ROCKSDB_MASTER_SKIP_TX_API;
SELECT @start_session_value;
@start_session_value
0
'# Setting to valid values in global scope#'
"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 1"
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 1;
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
@@global.ROCKSDB_MASTER_SKIP_TX_API
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT;
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
@@global.ROCKSDB_MASTER_SKIP_TX_API
0
"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 0"
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 0;
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
@@global.ROCKSDB_MASTER_SKIP_TX_API
0
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT;
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
@@global.ROCKSDB_MASTER_SKIP_TX_API
0
"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to on"
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = on;
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
@@global.ROCKSDB_MASTER_SKIP_TX_API
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT;
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
@@global.ROCKSDB_MASTER_SKIP_TX_API
0
'# Setting to valid values in session scope#'
"Trying to set variable @@session.ROCKSDB_MASTER_SKIP_TX_API to 1"
SET @@session.ROCKSDB_MASTER_SKIP_TX_API = 1;
SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API;
@@session.ROCKSDB_MASTER_SKIP_TX_API
1
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT;
SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API;
@@session.ROCKSDB_MASTER_SKIP_TX_API
0
"Trying to set variable @@session.ROCKSDB_MASTER_SKIP_TX_API to 0"
SET @@session.ROCKSDB_MASTER_SKIP_TX_API = 0;
SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API;
@@session.ROCKSDB_MASTER_SKIP_TX_API
0
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT;
SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API;
@@session.ROCKSDB_MASTER_SKIP_TX_API
0
"Trying to set variable @@session.ROCKSDB_MASTER_SKIP_TX_API to on"
SET @@session.ROCKSDB_MASTER_SKIP_TX_API = on;
SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API;
@@session.ROCKSDB_MASTER_SKIP_TX_API
1
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_MASTER_SKIP_TX_API = DEFAULT;
SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API;
@@session.ROCKSDB_MASTER_SKIP_TX_API
0
'# Testing with invalid values in global scope #'
"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 'aaa'"
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 'aaa';
Got one of the listed errors
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
@@global.ROCKSDB_MASTER_SKIP_TX_API
0
"Trying to set variable @@global.ROCKSDB_MASTER_SKIP_TX_API to 'bbb'"
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = 'bbb';
Got one of the listed errors
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
@@global.ROCKSDB_MASTER_SKIP_TX_API
0
SET @@global.ROCKSDB_MASTER_SKIP_TX_API = @start_global_value;
SELECT @@global.ROCKSDB_MASTER_SKIP_TX_API;
@@global.ROCKSDB_MASTER_SKIP_TX_API
0
SET @@session.ROCKSDB_MASTER_SKIP_TX_API = @start_session_value;
SELECT @@session.ROCKSDB_MASTER_SKIP_TX_API;
@@session.ROCKSDB_MASTER_SKIP_TX_API
0
DROP TABLE valid_values;
DROP TABLE invalid_values;
......@@ -3,12 +3,12 @@ INSERT INTO valid_values VALUES(1);
INSERT INTO valid_values VALUES(1024);
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO invalid_values VALUES('\'aaa\'');
SET @start_global_value = @@global.ROCKSDB_PERSISTENT_CACHE_SIZE;
SET @start_global_value = @@global.ROCKSDB_PERSISTENT_CACHE_SIZE_MB;
SELECT @start_global_value;
@start_global_value
0
"Trying to set variable @@global.ROCKSDB_PERSISTENT_CACHE_SIZE to 444. It should fail because it is readonly."
SET @@global.ROCKSDB_PERSISTENT_CACHE_SIZE = 444;
ERROR HY000: Variable 'rocksdb_persistent_cache_size' is a read only variable
"Trying to set variable @@global.ROCKSDB_PERSISTENT_CACHE_SIZE_MB to 444. It should fail because it is readonly."
SET @@global.ROCKSDB_PERSISTENT_CACHE_SIZE_MB = 444;
ERROR HY000: Variable 'rocksdb_persistent_cache_size_mb' is a read only variable
DROP TABLE valid_values;
DROP TABLE invalid_values;
......@@ -6,7 +6,7 @@ INSERT INTO invalid_values VALUES('\'aaa\'');
SET @start_global_value = @@global.ROCKSDB_WAL_RECOVERY_MODE;
SELECT @start_global_value;
@start_global_value
2
1
'# Setting to valid values in global scope#'
"Trying to set variable @@global.ROCKSDB_WAL_RECOVERY_MODE to 1"
SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 1;
......@@ -17,7 +17,7 @@ SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
SET @@global.ROCKSDB_WAL_RECOVERY_MODE = DEFAULT;
SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
@@global.ROCKSDB_WAL_RECOVERY_MODE
2
1
"Trying to set variable @@global.ROCKSDB_WAL_RECOVERY_MODE to 0"
SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 0;
SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
......@@ -27,7 +27,7 @@ SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
SET @@global.ROCKSDB_WAL_RECOVERY_MODE = DEFAULT;
SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
@@global.ROCKSDB_WAL_RECOVERY_MODE
2
1
"Trying to set variable @@session.ROCKSDB_WAL_RECOVERY_MODE to 444. It should fail because it is not session."
SET @@session.ROCKSDB_WAL_RECOVERY_MODE = 444;
ERROR HY000: Variable 'rocksdb_wal_recovery_mode' is a GLOBAL variable and should be set with SET GLOBAL
......@@ -37,10 +37,10 @@ SET @@global.ROCKSDB_WAL_RECOVERY_MODE = 'aaa';
Got one of the listed errors
SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
@@global.ROCKSDB_WAL_RECOVERY_MODE
2
1
SET @@global.ROCKSDB_WAL_RECOVERY_MODE = @start_global_value;
SELECT @@global.ROCKSDB_WAL_RECOVERY_MODE;
@@global.ROCKSDB_WAL_RECOVERY_MODE
2
1
DROP TABLE valid_values;
DROP TABLE invalid_values;
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO valid_values VALUES(1);
INSERT INTO valid_values VALUES(0);
INSERT INTO valid_values VALUES('on');
INSERT INTO valid_values VALUES('off');
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO invalid_values VALUES('\'aaa\'');
SET @start_global_value = @@global.ROCKSDB_WRITE_SYNC;
SELECT @start_global_value;
@start_global_value
0
SET @start_session_value = @@session.ROCKSDB_WRITE_SYNC;
SELECT @start_session_value;
@start_session_value
0
'# Setting to valid values in global scope#'
"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to 1"
SET @@global.ROCKSDB_WRITE_SYNC = 1;
SELECT @@global.ROCKSDB_WRITE_SYNC;
@@global.ROCKSDB_WRITE_SYNC
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT;
SELECT @@global.ROCKSDB_WRITE_SYNC;
@@global.ROCKSDB_WRITE_SYNC
0
"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to 0"
SET @@global.ROCKSDB_WRITE_SYNC = 0;
SELECT @@global.ROCKSDB_WRITE_SYNC;
@@global.ROCKSDB_WRITE_SYNC
0
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT;
SELECT @@global.ROCKSDB_WRITE_SYNC;
@@global.ROCKSDB_WRITE_SYNC
0
"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to on"
SET @@global.ROCKSDB_WRITE_SYNC = on;
SELECT @@global.ROCKSDB_WRITE_SYNC;
@@global.ROCKSDB_WRITE_SYNC
1
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT;
SELECT @@global.ROCKSDB_WRITE_SYNC;
@@global.ROCKSDB_WRITE_SYNC
0
"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to off"
SET @@global.ROCKSDB_WRITE_SYNC = off;
SELECT @@global.ROCKSDB_WRITE_SYNC;
@@global.ROCKSDB_WRITE_SYNC
0
"Setting the global scope variable back to default"
SET @@global.ROCKSDB_WRITE_SYNC = DEFAULT;
SELECT @@global.ROCKSDB_WRITE_SYNC;
@@global.ROCKSDB_WRITE_SYNC
0
'# Setting to valid values in session scope#'
"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to 1"
SET @@session.ROCKSDB_WRITE_SYNC = 1;
SELECT @@session.ROCKSDB_WRITE_SYNC;
@@session.ROCKSDB_WRITE_SYNC
1
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT;
SELECT @@session.ROCKSDB_WRITE_SYNC;
@@session.ROCKSDB_WRITE_SYNC
0
"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to 0"
SET @@session.ROCKSDB_WRITE_SYNC = 0;
SELECT @@session.ROCKSDB_WRITE_SYNC;
@@session.ROCKSDB_WRITE_SYNC
0
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT;
SELECT @@session.ROCKSDB_WRITE_SYNC;
@@session.ROCKSDB_WRITE_SYNC
0
"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to on"
SET @@session.ROCKSDB_WRITE_SYNC = on;
SELECT @@session.ROCKSDB_WRITE_SYNC;
@@session.ROCKSDB_WRITE_SYNC
1
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT;
SELECT @@session.ROCKSDB_WRITE_SYNC;
@@session.ROCKSDB_WRITE_SYNC
0
"Trying to set variable @@session.ROCKSDB_WRITE_SYNC to off"
SET @@session.ROCKSDB_WRITE_SYNC = off;
SELECT @@session.ROCKSDB_WRITE_SYNC;
@@session.ROCKSDB_WRITE_SYNC
0
"Setting the session scope variable back to default"
SET @@session.ROCKSDB_WRITE_SYNC = DEFAULT;
SELECT @@session.ROCKSDB_WRITE_SYNC;
@@session.ROCKSDB_WRITE_SYNC
0
'# Testing with invalid values in global scope #'
"Trying to set variable @@global.ROCKSDB_WRITE_SYNC to 'aaa'"
SET @@global.ROCKSDB_WRITE_SYNC = 'aaa';
Got one of the listed errors
SELECT @@global.ROCKSDB_WRITE_SYNC;
@@global.ROCKSDB_WRITE_SYNC
0
SET @@global.ROCKSDB_WRITE_SYNC = @start_global_value;
SELECT @@global.ROCKSDB_WRITE_SYNC;
@@global.ROCKSDB_WRITE_SYNC
0
SET @@session.ROCKSDB_WRITE_SYNC = @start_session_value;
SELECT @@session.ROCKSDB_WRITE_SYNC;
@@session.ROCKSDB_WRITE_SYNC
0
DROP TABLE valid_values;
DROP TABLE invalid_values;
--source include/have_rocksdb.inc
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO valid_values VALUES(1);
INSERT INTO valid_values VALUES(0);
INSERT INTO valid_values VALUES('on');
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO invalid_values VALUES('\'aaa\'');
INSERT INTO invalid_values VALUES('\'bbb\'');
--let $sys_var=ROCKSDB_BLIND_DELETE_PRIMARY_KEY
--let $read_only=0
--let $session=1
--source suite/sys_vars/inc/rocksdb_sys_var.inc
DROP TABLE valid_values;
DROP TABLE invalid_values;
call mtr.add_suppression(" Column family '[a-z]*' not found.");
--source include/have_rocksdb.inc
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
......@@ -10,6 +13,7 @@ CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
--let $read_only=0
--let $session=0
--let $sticky=1
--source suite/sys_vars/inc/rocksdb_sys_var.inc
DROP TABLE valid_values;
......
--source include/have_rocksdb.inc
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO valid_values VALUES(100);
INSERT INTO valid_values VALUES(1);
INSERT INTO valid_values VALUES(0);
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO invalid_values VALUES('\'aaa\'');
INSERT INTO invalid_values VALUES('\'bbb\'');
INSERT INTO invalid_values VALUES('\'-1\'');
INSERT INTO invalid_values VALUES('\'101\'');
INSERT INTO invalid_values VALUES('\'484436\'');
--let $sys_var=ROCKSDB_DELAYED_WRITE_RATE
--let $read_only=0
--let $session=0
--source suite/sys_vars/inc/rocksdb_sys_var.inc
DROP TABLE valid_values;
DROP TABLE invalid_values;
--source include/have_rocksdb.inc
--let $sys_var=ROCKSDB_DISABLEDATASYNC
--let $read_only=1
--let $session=0
--source suite/sys_vars/inc/rocksdb_sys_var.inc
--source include/have_rocksdb.inc
CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO valid_values VALUES(2);
INSERT INTO valid_values VALUES(1);
INSERT INTO valid_values VALUES(0);
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO invalid_values VALUES('\'aaa\'');
--let $sys_var=ROCKSDB_FLUSH_LOG_AT_TRX_COMMIT
--let $read_only=0
--let $session=1
--source suite/sys_vars/inc/rocksdb_sys_var.inc
DROP TABLE valid_values;
DROP TABLE invalid_values;
......@@ -37,8 +37,10 @@ INSERT INTO t1 (b) VALUES (3);
--sorted_result
SELECT * FROM t1;
--replace_column 5 # 6 #
SHOW TABLE STATUS LIKE 't1';
ANALYZE TABLE t1;
--replace_column 5 # 6 #
SHOW TABLE STATUS LIKE 't1';
DROP TABLE t1;
--source include/have_rocksdb.inc
--disable_warnings
DROP TABLE IF EXISTS t;
--enable_warnings
CREATE TABLE t (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb;
INSERT INTO t (a,b) VALUES (1,'bar'),(2,'foo');
SET @ORIG_PAUSE_BACKGROUND_WORK = @@rocksdb_force_compute_memtable_stats;
set global rocksdb_force_flush_memtable_now = true;
INSERT INTO t (a,b) VALUES (3,'dead'),(4,'beef'),(5,'a'),(6,'bbb'),(7,'c'),(8,'d');
set global rocksdb_force_compute_memtable_stats=0;
SELECT TABLE_ROWS INTO @ROWS_EXCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't';
set global rocksdb_force_compute_memtable_stats=1;
SELECT TABLE_ROWS INTO @ROWS_INCLUDE_MEMTABLE FROM information_schema.TABLES WHERE table_name = 't';
select case when @ROWS_INCLUDE_MEMTABLE-@ROWS_EXCLUDE_MEMTABLE > 0 then 'true' else 'false' end;
DROP TABLE t;
set global rocksdb_force_compute_memtable_stats = @ORIG_PAUSE_BACKGROUND_WORK;
......@@ -4,12 +4,12 @@ CREATE TABLE valid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO valid_values VALUES(1);
INSERT INTO valid_values VALUES(0);
INSERT INTO valid_values VALUES('on');
INSERT INTO valid_values VALUES('off');
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO invalid_values VALUES('\'aaa\'');
INSERT INTO invalid_values VALUES('\'bbb\'');
--let $sys_var=ROCKSDB_WRITE_SYNC
--let $sys_var=ROCKSDB_MASTER_SKIP_TX_API
--let $read_only=0
--let $session=1
--source suite/sys_vars/inc/rocksdb_sys_var.inc
......
......@@ -7,7 +7,7 @@ INSERT INTO valid_values VALUES(1024);
CREATE TABLE invalid_values (value varchar(255)) ENGINE=myisam;
INSERT INTO invalid_values VALUES('\'aaa\'');
--let $sys_var=ROCKSDB_PERSISTENT_CACHE_SIZE
--let $sys_var=ROCKSDB_PERSISTENT_CACHE_SIZE_MB
--let $read_only=1
--let $session=0
--source suite/sys_vars/inc/rocksdb_sys_var.inc
......
......@@ -483,9 +483,16 @@ void Rdb_index_stats::merge(const Rdb_index_stats &s, const bool &increment,
m_entry_single_deletes += s.m_entry_single_deletes;
m_entry_merges += s.m_entry_merges;
m_entry_others += s.m_entry_others;
if (s.m_distinct_keys_per_prefix.size() > 0) {
for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) {
m_distinct_keys_per_prefix[i] += s.m_distinct_keys_per_prefix[i];
}
} else {
for (i = 0; i < m_distinct_keys_per_prefix.size(); i++) {
m_distinct_keys_per_prefix[i] +=
s.m_rows >> (m_distinct_keys_per_prefix.size() - i - 1);
}
}
} else {
m_rows -= s.m_rows;
m_data_size -= s.m_data_size;
......@@ -495,9 +502,16 @@ void Rdb_index_stats::merge(const Rdb_index_stats &s, const bool &increment,
m_entry_single_deletes -= s.m_entry_single_deletes;
m_entry_merges -= s.m_entry_merges;
m_entry_others -= s.m_entry_others;
if (s.m_distinct_keys_per_prefix.size() > 0) {
for (i = 0; i < s.m_distinct_keys_per_prefix.size(); i++) {
m_distinct_keys_per_prefix[i] -= s.m_distinct_keys_per_prefix[i];
}
} else {
for (i = 0; i < m_distinct_keys_per_prefix.size(); i++) {
m_distinct_keys_per_prefix[i] -=
s.m_rows >> (m_distinct_keys_per_prefix.size() - i - 1);
}
}
}
}
......
......@@ -24,6 +24,7 @@
/* MyRocks header files */
#include "./ha_rocksdb.h"
#include "./ha_rocksdb_proto.h"
#include "./rdb_psi.h"
namespace myrocks {
......@@ -31,20 +32,13 @@ namespace myrocks {
bool Rdb_cf_manager::is_cf_name_reverse(const char *const name) {
/* nullptr means the default CF is used.. (TODO: can the default CF be
* reverse?) */
if (name && !strncmp(name, "rev:", 4))
return true;
else
return false;
return (name && !strncmp(name, "rev:", 4));
}
#ifdef HAVE_PSI_INTERFACE
static PSI_mutex_key ex_key_cfm;
#endif
void Rdb_cf_manager::init(
Rdb_cf_options *const cf_options,
std::vector<rocksdb::ColumnFamilyHandle *> *const handles) {
mysql_mutex_init(ex_key_cfm, &m_mutex, MY_MUTEX_INIT_FAST);
mysql_mutex_init(rdb_cfm_mutex_key, &m_mutex, MY_MUTEX_INIT_FAST);
DBUG_ASSERT(cf_options != nullptr);
DBUG_ASSERT(handles != nullptr);
......@@ -96,14 +90,20 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name,
DBUG_ASSERT(rdb != nullptr);
DBUG_ASSERT(is_automatic != nullptr);
rocksdb::ColumnFamilyHandle *cf_handle;
rocksdb::ColumnFamilyHandle *cf_handle = nullptr;
RDB_MUTEX_LOCK_CHECK(m_mutex);
mysql_mutex_lock(&m_mutex);
*is_automatic = false;
if (cf_name == nullptr)
if (cf_name == nullptr || *cf_name == '\0') {
cf_name = DEFAULT_CF_NAME;
}
DBUG_ASSERT(cf_name != nullptr);
std::string per_index_name;
if (!strcmp(cf_name, PER_INDEX_CF_NAME)) {
get_per_index_cf_name(db_table_name, index_name, &per_index_name);
cf_name = per_index_name.c_str();
......@@ -111,15 +111,17 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name,
}
const auto it = m_cf_name_map.find(cf_name);
if (it != m_cf_name_map.end())
if (it != m_cf_name_map.end()) {
cf_handle = it->second;
else {
} else {
/* Create a Column Family. */
const std::string cf_name_str(cf_name);
rocksdb::ColumnFamilyOptions opts;
m_cf_options->get_cf_options(cf_name_str, &opts);
sql_print_information("RocksDB: creating column family %s",
// NO_LINT_DEBUG
sql_print_information("RocksDB: creating a column family %s",
cf_name_str.c_str());
sql_print_information(" write_buffer_size=%ld", opts.write_buffer_size);
sql_print_information(" target_file_size_base=%" PRIu64,
......@@ -127,6 +129,7 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name,
const rocksdb::Status s =
rdb->CreateColumnFamily(opts, cf_name_str, &cf_handle);
if (s.ok()) {
m_cf_name_map[cf_handle->GetName()] = cf_handle;
m_cf_id_map[cf_handle->GetID()] = cf_handle;
......@@ -134,7 +137,8 @@ Rdb_cf_manager::get_or_create_cf(rocksdb::DB *const rdb, const char *cf_name,
cf_handle = nullptr;
}
}
mysql_mutex_unlock(&m_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
return cf_handle;
}
......@@ -160,13 +164,18 @@ Rdb_cf_manager::get_cf(const char *cf_name, const std::string &db_table_name,
rocksdb::ColumnFamilyHandle *cf_handle;
*is_automatic = false;
mysql_mutex_lock(&m_mutex);
if (cf_name == nullptr)
RDB_MUTEX_LOCK_CHECK(m_mutex);
if (cf_name == nullptr) {
cf_name = DEFAULT_CF_NAME;
}
std::string per_index_name;
if (!strcmp(cf_name, PER_INDEX_CF_NAME)) {
get_per_index_cf_name(db_table_name, index_name, &per_index_name);
DBUG_ASSERT(!per_index_name.empty());
cf_name = per_index_name.c_str();
*is_automatic = true;
}
......@@ -174,7 +183,12 @@ Rdb_cf_manager::get_cf(const char *cf_name, const std::string &db_table_name,
const auto it = m_cf_name_map.find(cf_name);
cf_handle = (it != m_cf_name_map.end()) ? it->second : nullptr;
mysql_mutex_unlock(&m_mutex);
if (!cf_handle) {
// NO_LINT_DEBUG
sql_print_warning("Column family '%s' not found.", cf_name);
}
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
return cf_handle;
}
......@@ -182,11 +196,11 @@ Rdb_cf_manager::get_cf(const char *cf_name, const std::string &db_table_name,
rocksdb::ColumnFamilyHandle *Rdb_cf_manager::get_cf(const uint32_t &id) const {
rocksdb::ColumnFamilyHandle *cf_handle = nullptr;
mysql_mutex_lock(&m_mutex);
RDB_MUTEX_LOCK_CHECK(m_mutex);
const auto it = m_cf_id_map.find(id);
if (it != m_cf_id_map.end())
cf_handle = it->second;
mysql_mutex_unlock(&m_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
return cf_handle;
}
......@@ -194,11 +208,12 @@ rocksdb::ColumnFamilyHandle *Rdb_cf_manager::get_cf(const uint32_t &id) const {
std::vector<std::string> Rdb_cf_manager::get_cf_names(void) const {
std::vector<std::string> names;
mysql_mutex_lock(&m_mutex);
RDB_MUTEX_LOCK_CHECK(m_mutex);
for (auto it : m_cf_name_map) {
names.push_back(it.first);
}
mysql_mutex_unlock(&m_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
return names;
}
......@@ -206,11 +221,13 @@ std::vector<rocksdb::ColumnFamilyHandle *>
Rdb_cf_manager::get_all_cf(void) const {
std::vector<rocksdb::ColumnFamilyHandle *> list;
mysql_mutex_lock(&m_mutex);
RDB_MUTEX_LOCK_CHECK(m_mutex);
for (auto it : m_cf_id_map) {
list.push_back(it.second);
}
mysql_mutex_unlock(&m_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
return list;
}
......
......@@ -55,12 +55,13 @@ Rdb_key_def::Rdb_key_def(uint indexnr_arg, uint keyno_arg,
rocksdb::ColumnFamilyHandle *cf_handle_arg,
uint16_t index_dict_version_arg, uchar index_type_arg,
uint16_t kv_format_version_arg, bool is_reverse_cf_arg,
bool is_auto_cf_arg, const char *_name,
Rdb_index_stats _stats)
bool is_auto_cf_arg, bool is_per_partition_cf_arg,
const char *_name, Rdb_index_stats _stats)
: m_index_number(indexnr_arg), m_cf_handle(cf_handle_arg),
m_index_dict_version(index_dict_version_arg),
m_index_type(index_type_arg), m_kv_format_version(kv_format_version_arg),
m_is_reverse_cf(is_reverse_cf_arg), m_is_auto_cf(is_auto_cf_arg),
m_is_per_partition_cf(is_per_partition_cf_arg),
m_name(_name), m_stats(_stats), m_pk_part_no(nullptr),
m_pack_info(nullptr), m_keyno(keyno_arg), m_key_parts(0),
m_prefix_extractor(nullptr), m_maxlength(0) // means 'not intialized'
......@@ -73,6 +74,7 @@ Rdb_key_def::Rdb_key_def(uint indexnr_arg, uint keyno_arg,
Rdb_key_def::Rdb_key_def(const Rdb_key_def &k)
: m_index_number(k.m_index_number), m_cf_handle(k.m_cf_handle),
m_is_reverse_cf(k.m_is_reverse_cf), m_is_auto_cf(k.m_is_auto_cf),
m_is_per_partition_cf(k.m_is_per_partition_cf),
m_name(k.m_name), m_stats(k.m_stats), m_pk_part_no(k.m_pk_part_no),
m_pack_info(k.m_pack_info), m_keyno(k.m_keyno),
m_key_parts(k.m_key_parts), m_prefix_extractor(k.m_prefix_extractor),
......@@ -116,9 +118,9 @@ void Rdb_key_def::setup(const TABLE *const tbl,
const bool hidden_pk_exists = table_has_hidden_pk(tbl);
const bool secondary_key = (m_index_type == INDEX_TYPE_SECONDARY);
if (!m_maxlength) {
mysql_mutex_lock(&m_mutex);
RDB_MUTEX_LOCK_CHECK(m_mutex);
if (m_maxlength != 0) {
mysql_mutex_unlock(&m_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
return;
}
......@@ -280,7 +282,7 @@ void Rdb_key_def::setup(const TABLE *const tbl,
*/
m_maxlength = max_len;
mysql_mutex_unlock(&m_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
}
}
......@@ -515,6 +517,50 @@ int Rdb_key_def::successor(uchar *const packed_tuple, const uint &len) {
return changed;
}
uchar *Rdb_key_def::pack_field(
Field *const field,
Rdb_field_packing *pack_info,
uchar * tuple,
uchar *const packed_tuple,
uchar *const pack_buffer,
Rdb_string_writer *const unpack_info,
uint *const n_null_fields) const
{
if (field->real_maybe_null()) {
DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 1));
if (field->is_real_null()) {
/* NULL value. store '\0' so that it sorts before non-NULL values */
*tuple++ = 0;
/* That's it, don't store anything else */
if (n_null_fields)
(*n_null_fields)++;
return tuple;
} else {
/* Not a NULL value. Store '1' */
*tuple++ = 1;
}
}
const bool create_unpack_info =
(unpack_info && // we were requested to generate unpack_info
pack_info->uses_unpack_info()); // and this keypart uses it
Rdb_pack_field_context pack_ctx(unpack_info);
// Set the offset for methods which do not take an offset as an argument
DBUG_ASSERT(is_storage_available(tuple - packed_tuple,
pack_info->m_max_image_len));
pack_info->m_pack_func(pack_info, field, pack_buffer, &tuple, &pack_ctx);
/* Make "unpack info" to be stored in the value */
if (create_unpack_info) {
pack_info->m_make_unpack_info_func(pack_info->m_charset_codec, field,
&pack_ctx);
}
return tuple;
}
/**
Get index columns from the record and pack them into mem-comparable form.
......@@ -595,45 +641,21 @@ uint Rdb_key_def::pack_record(const TABLE *const tbl, uchar *const pack_buffer,
Field *const field = m_pack_info[i].get_field_in_table(tbl);
DBUG_ASSERT(field != nullptr);
// Old Field methods expected the record pointer to be at tbl->record[0].
// The quick and easy way to fix this was to pass along the offset
// for the pointer.
const my_ptrdiff_t ptr_diff = record - tbl->record[0];
uint field_offset = field->ptr - tbl->record[0];
uint null_offset = field->null_offset(tbl->record[0]);
bool maybe_null = field->real_maybe_null();
field->move_field(const_cast<uchar*>(record) + field_offset,
maybe_null ? const_cast<uchar*>(record) + null_offset : nullptr,
field->null_bit);
// WARNING! Don't return without restoring field->ptr and field->null_ptr
if (field->real_maybe_null()) {
DBUG_ASSERT(is_storage_available(tuple - packed_tuple, 1));
if (field->is_real_null(ptr_diff)) {
/* NULL value. store '\0' so that it sorts before non-NULL values */
*tuple++ = 0;
/* That's it, don't store anything else */
if (n_null_fields)
(*n_null_fields)++;
continue;
} else {
/* Not a NULL value. Store '1' */
*tuple++ = 1;
}
}
tuple = pack_field(field, &m_pack_info[i], tuple, packed_tuple, pack_buffer,
unpack_info, n_null_fields);
const bool create_unpack_info =
(unpack_info && // we were requested to generate unpack_info
m_pack_info[i].uses_unpack_info()); // and this keypart uses it
Rdb_pack_field_context pack_ctx(unpack_info);
// Set the offset for methods which do not take an offset as an argument
DBUG_ASSERT(is_storage_available(tuple - packed_tuple,
m_pack_info[i].m_max_image_len));
field->move_field_offset(ptr_diff);
m_pack_info[i].m_pack_func(&m_pack_info[i], field, pack_buffer, &tuple,
&pack_ctx);
/* Make "unpack info" to be stored in the value */
if (create_unpack_info) {
m_pack_info[i].m_make_unpack_info_func(m_pack_info[i].m_charset_codec,
field, &pack_ctx);
}
field->move_field_offset(-ptr_diff);
// Restore field->ptr and field->null_ptr
field->move_field(tbl->record[0] + field_offset,
maybe_null ? tbl->record[0] + null_offset : nullptr,
field->null_bit);
}
if (unpack_info) {
......@@ -824,6 +846,35 @@ size_t Rdb_key_def::key_length(const TABLE *const table,
return key.size() - reader.remaining_bytes();
}
int Rdb_key_def::unpack_field(
Rdb_field_packing *const fpi,
Field *const field,
Rdb_string_reader* reader,
const uchar *const default_value,
Rdb_string_reader* unp_reader) const
{
if (fpi->m_maybe_null) {
const char *nullp;
if (!(nullp = reader->read(1))) {
return HA_EXIT_FAILURE;
}
if (*nullp == 0) {
/* Set the NULL-bit of this field */
field->set_null();
/* Also set the field to its default value */
memcpy(field->ptr, default_value, field->pack_length());
return HA_EXIT_SUCCESS;
} else if (*nullp == 1) {
field->set_notnull();
} else {
return HA_EXIT_FAILURE;
}
}
return fpi->m_unpack_func(fpi, field, field->ptr, reader, unp_reader);
}
/*
Take mem-comparable form and unpack_info and unpack it to Table->record
......@@ -850,11 +901,6 @@ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf,
// ha_rocksdb::convert_record_from_storage_format instead.
DBUG_ASSERT_IMP(!secondary_key, !verify_row_debug_checksums);
// Old Field methods expected the record pointer to be at tbl->record[0].
// The quick and easy way to fix this was to pass along the offset
// for the pointer.
const my_ptrdiff_t ptr_diff = buf - table->record[0];
// Skip the index number
if ((!reader.read(INDEX_NUMBER_SIZE))) {
return HA_EXIT_FAILURE;
......@@ -891,35 +937,31 @@ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf,
if (fpi->m_unpack_func) {
/* It is possible to unpack this column. Do it. */
if (fpi->m_maybe_null) {
const char *nullp;
if (!(nullp = reader.read(1)))
return HA_EXIT_FAILURE;
if (*nullp == 0) {
/* Set the NULL-bit of this field */
field->set_null(ptr_diff);
/* Also set the field to its default value */
uint field_offset = field->ptr - table->record[0];
memcpy(buf + field_offset, table->s->default_values + field_offset,
field->pack_length());
continue;
} else if (*nullp == 1)
field->set_notnull(ptr_diff);
else
return HA_EXIT_FAILURE;
}
uint null_offset = field->null_offset();
bool maybe_null = field->real_maybe_null();
field->move_field(buf + field_offset,
maybe_null ? buf + null_offset : nullptr,
field->null_bit);
// WARNING! Don't return without restoring field->ptr and field->null_ptr
// If we need unpack info, but there is none, tell the unpack function
// this by passing unp_reader as nullptr. If we never read unpack_info
// during unpacking anyway, then there won't an error.
const bool maybe_missing_unpack =
!has_unpack_info && fpi->uses_unpack_info();
const int res =
fpi->m_unpack_func(fpi, field, field->ptr + ptr_diff, &reader,
int res = unpack_field(fpi, field, &reader,
table->s->default_values + field_offset,
maybe_missing_unpack ? nullptr : &unp_reader);
if (res)
// Restore field->ptr and field->null_ptr
field->move_field(table->record[0] + field_offset,
maybe_null ? table->record[0] + null_offset : nullptr,
field->null_bit);
if (res) {
return res;
}
} else {
/* It is impossible to unpack the column. Skip it. */
if (fpi->m_maybe_null) {
......@@ -2141,7 +2183,7 @@ static void rdb_get_mem_comparable_space(const CHARSET_INFO *const cs,
size_t *const mb_len) {
DBUG_ASSERT(cs->number < MY_ALL_CHARSETS_SIZE);
if (!rdb_mem_comparable_space[cs->number].get()) {
mysql_mutex_lock(&rdb_mem_cmp_space_mutex);
RDB_MUTEX_LOCK_CHECK(rdb_mem_cmp_space_mutex);
if (!rdb_mem_comparable_space[cs->number].get()) {
// Upper bound of how many bytes can be occupied by multi-byte form of a
// character in any charset.
......@@ -2167,7 +2209,7 @@ static void rdb_get_mem_comparable_space(const CHARSET_INFO *const cs,
}
rdb_mem_comparable_space[cs->number].reset(info);
}
mysql_mutex_unlock(&rdb_mem_cmp_space_mutex);
RDB_MUTEX_UNLOCK_CHECK(rdb_mem_cmp_space_mutex);
}
*xfrm = &rdb_mem_comparable_space[cs->number]->spaces_xfrm;
......@@ -2191,7 +2233,8 @@ rdb_init_collation_mapping(const my_core::CHARSET_INFO *const cs) {
const Rdb_collation_codec *codec = rdb_collation_data[cs->number];
if (codec == nullptr && rdb_is_collation_supported(cs)) {
mysql_mutex_lock(&rdb_collation_data_mutex);
RDB_MUTEX_LOCK_CHECK(rdb_collation_data_mutex);
codec = rdb_collation_data[cs->number];
if (codec == nullptr) {
Rdb_collation_codec *cur = nullptr;
......@@ -2235,7 +2278,8 @@ rdb_init_collation_mapping(const my_core::CHARSET_INFO *const cs) {
rdb_collation_data[cs->number] = cur;
}
}
mysql_mutex_unlock(&rdb_collation_data_mutex);
RDB_MUTEX_UNLOCK_CHECK(rdb_collation_data_mutex);
}
return codec;
......@@ -2597,9 +2641,10 @@ bool Rdb_tbl_def::put_dict(Rdb_dict_manager *const dict,
for (uint i = 0; i < m_key_count; i++) {
const Rdb_key_def &kd = *m_key_descr_arr[i];
const uchar flags =
uchar flags =
(kd.m_is_reverse_cf ? Rdb_key_def::REVERSE_CF_FLAG : 0) |
(kd.m_is_auto_cf ? Rdb_key_def::AUTO_CF_FLAG : 0);
(kd.m_is_auto_cf ? Rdb_key_def::AUTO_CF_FLAG : 0) |
(kd.m_is_per_partition_cf ? Rdb_key_def::PER_PARTITION_CF_FLAG : 0);
const uint cf_id = kd.get_cf()->GetID();
/*
......@@ -2610,13 +2655,21 @@ bool Rdb_tbl_def::put_dict(Rdb_dict_manager *const dict,
control, we can switch to use it and removing mutex.
*/
uint existing_cf_flags;
const std::string cf_name = kd.get_cf()->GetName();
if (dict->get_cf_flags(cf_id, &existing_cf_flags)) {
// For the purposes of comparison we'll clear the partitioning bit. The
// intent here is to make sure that both partitioned and non-partitioned
// tables can refer to the same CF.
existing_cf_flags &= ~Rdb_key_def::CF_FLAGS_TO_IGNORE;
flags &= ~Rdb_key_def::CF_FLAGS_TO_IGNORE;
if (existing_cf_flags != flags) {
my_printf_error(ER_UNKNOWN_ERROR,
"Column Family Flag is different from existing flag. "
"Assign a new CF flag, or do not change existing "
"CF flag.",
MYF(0));
"Column family ('%s') flag (%d) is different from an "
"existing flag (%d). Assign a new CF flag, or do not "
"change existing CF flag.", MYF(0), cf_name.c_str(),
flags, existing_cf_flags);
return true;
}
} else {
......@@ -2690,6 +2743,24 @@ void Rdb_ddl_manager::erase_index_num(const GL_INDEX_ID &gl_index_id) {
m_index_num_to_keydef.erase(gl_index_id);
}
void Rdb_ddl_manager::add_uncommitted_keydefs(
const std::unordered_set<std::shared_ptr<Rdb_key_def>> &indexes) {
mysql_rwlock_wrlock(&m_rwlock);
for (const auto &index : indexes) {
m_index_num_to_uncommitted_keydef[index->get_gl_index_id()] = index;
}
mysql_rwlock_unlock(&m_rwlock);
}
void Rdb_ddl_manager::remove_uncommitted_keydefs(
const std::unordered_set<std::shared_ptr<Rdb_key_def>> &indexes) {
mysql_rwlock_wrlock(&m_rwlock);
for (const auto &index : indexes) {
m_index_num_to_uncommitted_keydef.erase(index->get_gl_index_id());
}
mysql_rwlock_unlock(&m_rwlock);
}
namespace // anonymous namespace = not visible outside this source file
{
struct Rdb_validate_tbls : public Rdb_tables_scanner {
......@@ -3005,7 +3076,8 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *const dict_arg,
tdef->m_key_descr_arr[keyno] = std::make_shared<Rdb_key_def>(
gl_index_id.index_id, keyno, cfh, m_index_dict_version, m_index_type,
kv_version, flags & Rdb_key_def::REVERSE_CF_FLAG,
flags & Rdb_key_def::AUTO_CF_FLAG, "",
flags & Rdb_key_def::AUTO_CF_FLAG,
flags & Rdb_key_def::PER_PARTITION_CF_FLAG, "",
m_dict->get_stats(gl_index_id));
}
put(tdef);
......@@ -3079,6 +3151,14 @@ Rdb_ddl_manager::safe_find(GL_INDEX_ID gl_index_id) {
ret = kd;
}
}
} else {
auto it = m_index_num_to_uncommitted_keydef.find(gl_index_id);
if (it != m_index_num_to_uncommitted_keydef.end()) {
const auto &kd = it->second;
if (kd->max_storage_fmt_length() != 0) {
ret = kd;
}
}
}
mysql_rwlock_unlock(&m_rwlock);
......@@ -3097,6 +3177,11 @@ Rdb_ddl_manager::find(GL_INDEX_ID gl_index_id) {
return table_def->m_key_descr_arr[it->second.second];
}
}
} else {
auto it = m_index_num_to_uncommitted_keydef.find(gl_index_id);
if (it != m_index_num_to_uncommitted_keydef.end()) {
return it->second;
}
}
static std::shared_ptr<Rdb_key_def> empty = nullptr;
......@@ -3126,6 +3211,8 @@ void Rdb_ddl_manager::adjust_stats(
for (const auto &src : data) {
const auto &keydef = find(src.m_gl_index_id);
if (keydef) {
keydef->m_stats.m_distinct_keys_per_prefix.resize(
keydef->get_key_parts());
keydef->m_stats.merge(src, i == 0, keydef->max_storage_fmt_length());
m_stats2store[keydef->m_stats.m_gl_index_id] = keydef->m_stats;
}
......@@ -3671,6 +3758,7 @@ void Rdb_dict_manager::add_cf_flags(rocksdb::WriteBatch *const batch,
void Rdb_dict_manager::delete_index_info(rocksdb::WriteBatch *batch,
const GL_INDEX_ID &gl_index_id) const {
delete_with_prefix(batch, Rdb_key_def::INDEX_INFO, gl_index_id);
delete_with_prefix(batch, Rdb_key_def::INDEX_STATISTICS, gl_index_id);
}
bool Rdb_dict_manager::get_index_info(const GL_INDEX_ID &gl_index_id,
......@@ -4133,7 +4221,7 @@ uint Rdb_seq_generator::get_and_update_next_number(
DBUG_ASSERT(dict != nullptr);
uint res;
mysql_mutex_lock(&m_mutex);
RDB_MUTEX_LOCK_CHECK(m_mutex);
res = m_next_number++;
......@@ -4144,7 +4232,7 @@ uint Rdb_seq_generator::get_and_update_next_number(
dict->update_max_index_id(batch, res);
dict->commit(batch);
mysql_mutex_unlock(&m_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
return res;
}
......
......@@ -167,6 +167,13 @@ class Rdb_key_def {
uchar *const packed_tuple, const uchar *const key_tuple,
const key_part_map &keypart_map) const;
uchar *pack_field(Field *const field,
Rdb_field_packing *pack_info,
uchar * tuple,
uchar *const packed_tuple,
uchar *const pack_buffer,
Rdb_string_writer *const unpack_info,
uint *const n_null_fields) const;
/* Convert a key from Table->record format to mem-comparable form */
uint pack_record(const TABLE *const tbl, uchar *const pack_buffer,
const uchar *const record, uchar *const packed_tuple,
......@@ -177,6 +184,11 @@ class Rdb_key_def {
/* Pack the hidden primary key into mem-comparable form. */
uint pack_hidden_pk(const longlong &hidden_pk_id,
uchar *const packed_tuple) const;
int unpack_field(Rdb_field_packing *const fpi,
Field *const field,
Rdb_string_reader* reader,
const uchar *const default_value,
Rdb_string_reader* unp_reader) const;
int unpack_record(TABLE *const table, uchar *const buf,
const rocksdb::Slice *const packed_key,
const rocksdb::Slice *const unpack_info,
......@@ -287,7 +299,7 @@ class Rdb_key_def {
rocksdb::ColumnFamilyHandle *cf_handle_arg,
uint16_t index_dict_version_arg, uchar index_type_arg,
uint16_t kv_format_version_arg, bool is_reverse_cf_arg,
bool is_auto_cf_arg, const char *name,
bool is_auto_cf_arg, bool is_per_partition_cf, const char *name,
Rdb_index_stats stats = Rdb_index_stats());
~Rdb_key_def();
......@@ -303,8 +315,13 @@ class Rdb_key_def {
enum {
REVERSE_CF_FLAG = 1,
AUTO_CF_FLAG = 2,
PER_PARTITION_CF_FLAG = 4,
};
// Set of flags to ignore when comparing two CF-s and determining if
// they're same.
static const uint CF_FLAGS_TO_IGNORE = PER_PARTITION_CF_FLAG;
// Data dictionary types
enum DATA_DICT_TYPE {
DDL_ENTRY_INDEX_START_NUMBER = 1,
......@@ -414,6 +431,10 @@ class Rdb_key_def {
bool m_is_reverse_cf;
bool m_is_auto_cf;
/* If true, then column family is created per partition. */
bool m_is_per_partition_cf;
std::string m_name;
mutable Rdb_index_stats m_stats;
......@@ -740,8 +761,13 @@ interface Rdb_tables_scanner {
class Rdb_ddl_manager {
Rdb_dict_manager *m_dict = nullptr;
my_core::HASH m_ddl_hash; // Contains Rdb_tbl_def elements
// maps index id to <table_name, index number>
// Maps index id to <table_name, index number>
std::map<GL_INDEX_ID, std::pair<std::string, uint>> m_index_num_to_keydef;
// Maps index id to key definitons not yet committed to data dictionary.
// This is mainly used to store key definitions during ALTER TABLE.
std::map<GL_INDEX_ID, std::shared_ptr<Rdb_key_def>>
m_index_num_to_uncommitted_keydef;
mysql_rwlock_t m_rwlock;
Rdb_seq_generator m_sequence;
......@@ -787,6 +813,10 @@ class Rdb_ddl_manager {
int scan_for_tables(Rdb_tables_scanner *tables_scanner);
void erase_index_num(const GL_INDEX_ID &gl_index_id);
void add_uncommitted_keydefs(
const std::unordered_set<std::shared_ptr<Rdb_key_def>> &indexes);
void remove_uncommitted_keydefs(
const std::unordered_set<std::shared_ptr<Rdb_key_def>> &indexes);
private:
/* Put the data into in-memory table (only) */
......@@ -867,7 +897,7 @@ class Rdb_binlog_manager {
3. CF id => CF flags
key: Rdb_key_def::CF_DEFINITION(0x3) + cf_id
value: version, {is_reverse_cf, is_auto_cf}
value: version, {is_reverse_cf, is_auto_cf, is_per_partition_cf}
cf_flags is 4 bytes in total.
4. Binlog entry (updated at commit)
......@@ -930,9 +960,9 @@ class Rdb_dict_manager {
inline void cleanup() { mysql_mutex_destroy(&m_mutex); }
inline void lock() { mysql_mutex_lock(&m_mutex); }
inline void lock() { RDB_MUTEX_LOCK_CHECK(m_mutex); }
inline void unlock() { mysql_mutex_unlock(&m_mutex); }
inline void unlock() { RDB_MUTEX_UNLOCK_CHECK(m_mutex); }
/* Raw RocksDB operations */
std::unique_ptr<rocksdb::WriteBatch> begin() const;
......
......@@ -256,7 +256,7 @@ static int rdb_i_s_perf_context_fill_table(
Rdb_perf_counters counters;
if (rdb_normalize_tablename(it, &str)) {
return HA_ERR_INTERNAL_ERROR;
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
}
if (rdb_split_normalized_tablename(str, &dbname, &tablename, &partname)) {
......@@ -447,8 +447,6 @@ static int rdb_i_s_cfoptions_fill_table(
opts.disable_auto_compactions ? "ON" : "OFF"},
{"PURGE_REDUNDANT_KVS_WHILE_FLUSH",
opts.purge_redundant_kvs_while_flush ? "ON" : "OFF"},
{"VERIFY_CHECKSUM_IN_COMPACTION",
opts.verify_checksums_in_compaction ? "ON" : "OFF"},
{"MAX_SEQUENTIAL_SKIP_IN_ITERATIONS",
std::to_string(opts.max_sequential_skip_in_iterations)},
{"MEMTABLE_FACTORY", opts.memtable_factory == nullptr
......@@ -463,8 +461,6 @@ static int rdb_i_s_cfoptions_fill_table(
std::to_string(opts.memtable_huge_page_size)},
{"BLOOM_LOCALITY", std::to_string(opts.bloom_locality)},
{"MAX_SUCCESSIVE_MERGES", std::to_string(opts.max_successive_merges)},
{"MIN_PARTIAL_MERGE_OPERANDS",
std::to_string(opts.min_partial_merge_operands)},
{"OPTIMIZE_FILTERS_FOR_HITS",
(opts.optimize_filters_for_hits ? "ON" : "OFF")},
};
......@@ -811,7 +807,7 @@ static int rdb_i_s_compact_stats_fill_table(
DBUG_ASSERT(thd != nullptr);
DBUG_ASSERT(tables != nullptr);
DBUG_ENTER("rdb_i_s_global_compact_stats_table");
DBUG_ENTER_FUNC();
int ret = 0;
......@@ -1025,7 +1021,7 @@ static int rdb_i_s_global_info_init(void *const p) {
static int rdb_i_s_compact_stats_init(void *p) {
my_core::ST_SCHEMA_TABLE *schema;
DBUG_ENTER("rdb_i_s_compact_stats_init");
DBUG_ENTER_FUNC();
DBUG_ASSERT(p != nullptr);
schema = reinterpret_cast<my_core::ST_SCHEMA_TABLE *>(p);
......
......@@ -159,7 +159,7 @@ Rdb_mutex::Rdb_mutex() {
Rdb_mutex::~Rdb_mutex() { mysql_mutex_destroy(&m_mutex); }
Status Rdb_mutex::Lock() {
mysql_mutex_lock(&m_mutex);
RDB_MUTEX_LOCK_CHECK(m_mutex);
DBUG_ASSERT(m_old_stage_info.count(current_thd) == 0);
return Status::OK();
}
......@@ -174,7 +174,7 @@ Status Rdb_mutex::TryLockFor(int64_t timeout_time MY_ATTRIBUTE((__unused__))) {
Note: PThreads API has pthread_mutex_timedlock(), but mysql's
mysql_mutex_* wrappers do not wrap that function.
*/
mysql_mutex_lock(&m_mutex);
RDB_MUTEX_LOCK_CHECK(m_mutex);
return Status::OK();
}
......@@ -202,7 +202,7 @@ void Rdb_mutex::UnLock() {
return;
}
#endif
mysql_mutex_unlock(&m_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_mutex);
}
} // namespace myrocks
/* Copyright (c) 2017, Percona and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#ifdef USE_PRAGMA_IMPLEMENTATION
#pragma implementation // gcc: Class implementation
#endif
#define MYSQL_SERVER 1
/* The C++ file's header */
#include "./rdb_psi.h"
/* MySQL header files */
#include <mysql/psi/mysql_stage.h>
namespace myrocks {
/*
The following is needed as an argument for mysql_stage_register,
irrespectively of whether we're compiling with P_S or not.
*/
my_core::PSI_stage_info stage_waiting_on_row_lock = {0, "Waiting for row lock",
0};
#ifdef HAVE_PSI_INTERFACE
my_core::PSI_stage_info *all_rocksdb_stages[] = {&stage_waiting_on_row_lock};
my_core::PSI_thread_key rdb_background_psi_thread_key,
rdb_drop_idx_psi_thread_key;
my_core::PSI_thread_info all_rocksdb_threads[] = {
{&rdb_background_psi_thread_key, "background", PSI_FLAG_GLOBAL},
{&rdb_drop_idx_psi_thread_key, "drop index", PSI_FLAG_GLOBAL},
};
my_core::PSI_mutex_key rdb_psi_open_tbls_mutex_key, rdb_signal_bg_psi_mutex_key,
rdb_signal_drop_idx_psi_mutex_key, rdb_collation_data_mutex_key,
rdb_mem_cmp_space_mutex_key, key_mutex_tx_list, rdb_sysvars_psi_mutex_key,
rdb_cfm_mutex_key;
my_core::PSI_mutex_info all_rocksdb_mutexes[] = {
{&rdb_psi_open_tbls_mutex_key, "open tables", PSI_FLAG_GLOBAL},
{&rdb_signal_bg_psi_mutex_key, "stop background", PSI_FLAG_GLOBAL},
{&rdb_signal_drop_idx_psi_mutex_key, "signal drop index", PSI_FLAG_GLOBAL},
{&rdb_collation_data_mutex_key, "collation data init", PSI_FLAG_GLOBAL},
{&rdb_mem_cmp_space_mutex_key, "collation space char data init",
PSI_FLAG_GLOBAL},
{&key_mutex_tx_list, "tx_list", PSI_FLAG_GLOBAL},
{&rdb_sysvars_psi_mutex_key, "setting sysvar", PSI_FLAG_GLOBAL},
{&rdb_cfm_mutex_key, "column family manager", PSI_FLAG_GLOBAL},
};
my_core::PSI_rwlock_key key_rwlock_collation_exception_list,
key_rwlock_read_free_rpl_tables, key_rwlock_skip_unique_check_tables;
my_core::PSI_rwlock_info all_rocksdb_rwlocks[] = {
{&key_rwlock_collation_exception_list, "collation_exception_list",
PSI_FLAG_GLOBAL},
{&key_rwlock_read_free_rpl_tables, "read_free_rpl_tables", PSI_FLAG_GLOBAL},
{&key_rwlock_skip_unique_check_tables, "skip_unique_check_tables",
PSI_FLAG_GLOBAL},
};
my_core::PSI_cond_key rdb_signal_bg_psi_cond_key,
rdb_signal_drop_idx_psi_cond_key;
my_core::PSI_cond_info all_rocksdb_conds[] = {
{&rdb_signal_bg_psi_cond_key, "cond signal background", PSI_FLAG_GLOBAL},
{&rdb_signal_drop_idx_psi_cond_key, "cond signal drop index",
PSI_FLAG_GLOBAL},
};
void init_rocksdb_psi_keys() {
const char *const category = "rocksdb";
int count;
if (PSI_server == nullptr)
return;
count = array_elements(all_rocksdb_mutexes);
PSI_server->register_mutex(category, all_rocksdb_mutexes, count);
count = array_elements(all_rocksdb_rwlocks);
PSI_server->register_rwlock(category, all_rocksdb_rwlocks, count);
count = array_elements(all_rocksdb_conds);
//TODO Disabling PFS for conditions due to the bug
// https://github.com/MySQLOnRocksDB/mysql-5.6/issues/92
// PSI_server->register_cond(category, all_rocksdb_conds, count);
count = array_elements(all_rocksdb_stages);
mysql_stage_register(category, all_rocksdb_stages, count);
count = array_elements(all_rocksdb_threads);
mysql_thread_register(category, all_rocksdb_threads, count);
}
#else // HAVE_PSI_INTERFACE
void init_rocksdb_psi_keys() {}
#endif // HAVE_PSI_INTERFACE
} // namespace myrocks
/* Copyright (c) 2017, Percona and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#pragma once
#ifndef _rdb_psi_h_
#define _rdb_psi_h_
/* MySQL header files */
#include <my_global.h>
#include <mysql/psi/psi.h>
/* MyRocks header files */
#include "./rdb_utils.h"
namespace myrocks {
/*
The following is needed as an argument for mysql_stage_register,
irrespectively of whether we're compiling with P_S or not.
*/
extern my_core::PSI_stage_info stage_waiting_on_row_lock;
#ifdef HAVE_PSI_INTERFACE
extern my_core::PSI_thread_key rdb_background_psi_thread_key,
rdb_drop_idx_psi_thread_key;
extern my_core::PSI_mutex_key rdb_psi_open_tbls_mutex_key,
rdb_signal_bg_psi_mutex_key, rdb_signal_drop_idx_psi_mutex_key,
rdb_collation_data_mutex_key, rdb_mem_cmp_space_mutex_key,
key_mutex_tx_list, rdb_sysvars_psi_mutex_key, rdb_cfm_mutex_key;
extern my_core::PSI_rwlock_key key_rwlock_collation_exception_list,
key_rwlock_read_free_rpl_tables, key_rwlock_skip_unique_check_tables;
extern my_core::PSI_cond_key rdb_signal_bg_psi_cond_key,
rdb_signal_drop_idx_psi_cond_key;
#endif // HAVE_PSI_INTERFACE
void init_rocksdb_psi_keys();
} // namespace myrocks
#endif // _rdb_psi_h_
......@@ -191,6 +191,10 @@ Rdb_sst_info::Rdb_sst_info(rocksdb::DB *const db, const std::string &tablename,
m_prefix += normalized_table + "_" + indexname + "_";
}
// Unique filename generated to prevent collisions when the same table
// is loaded in parallel
m_prefix += std::to_string(m_prefix_counter.fetch_add(1)) + "_";
rocksdb::ColumnFamilyDescriptor cf_descr;
const rocksdb::Status s = m_cf->GetDescriptor(&cf_descr);
if (!s.ok()) {
......@@ -221,7 +225,7 @@ int Rdb_sst_info::open_new_sst_file() {
// Open the sst file
const rocksdb::Status s = m_sst_file->open();
if (!s.ok()) {
set_error_msg(s.ToString());
set_error_msg(m_sst_file->get_name(), s.ToString());
delete m_sst_file;
m_sst_file = nullptr;
return HA_EXIT_FAILURE;
......@@ -255,7 +259,7 @@ void Rdb_sst_info::close_curr_sst_file() {
#else
const rocksdb::Status s = m_sst_file->commit();
if (!s.ok()) {
set_error_msg(s.ToString());
set_error_msg(m_sst_file->get_name(), s.ToString());
}
delete m_sst_file;
......@@ -293,7 +297,7 @@ int Rdb_sst_info::put(const rocksdb::Slice &key, const rocksdb::Slice &value) {
// Add the key/value to the current sst file
const rocksdb::Status s = m_sst_file->put(key, value);
if (!s.ok()) {
set_error_msg(s.ToString());
set_error_msg(m_sst_file->get_name(), s.ToString());
return HA_EXIT_FAILURE;
}
......@@ -329,16 +333,18 @@ int Rdb_sst_info::commit() {
return HA_EXIT_SUCCESS;
}
void Rdb_sst_info::set_error_msg(const std::string &msg) {
void Rdb_sst_info::set_error_msg(const std::string &sst_file_name,
const std::string &msg) {
#if defined(RDB_SST_INFO_USE_THREAD)
// Both the foreground and background threads can set the error message
// so lock the mutex to protect it. We only want the first error that
// we encounter.
const std::lock_guard<std::mutex> guard(m_mutex);
#endif
my_printf_error(ER_UNKNOWN_ERROR, "bulk load error: %s", MYF(0), msg.c_str());
my_printf_error(ER_UNKNOWN_ERROR, "[%s] bulk load error: %s", MYF(0),
sst_file_name.c_str(), msg.c_str());
if (m_error_msg.empty()) {
m_error_msg = msg;
m_error_msg = "[" + sst_file_name + "] " + msg;
}
}
......@@ -366,7 +372,7 @@ void Rdb_sst_info::run_thread() {
// Close out the sst file and add it to the database
const rocksdb::Status s = sst_file->commit();
if (!s.ok()) {
set_error_msg(s.ToString());
set_error_msg(sst_file->get_name(), s.ToString());
}
delete sst_file;
......@@ -412,5 +418,6 @@ void Rdb_sst_info::init(const rocksdb::DB *const db) {
my_dirend(dir_info);
}
std::atomic<uint64_t> Rdb_sst_info::m_prefix_counter(0);
std::string Rdb_sst_info::m_suffix = ".bulk_load.tmp";
} // namespace myrocks
......@@ -17,6 +17,7 @@
#pragma once
/* C++ standard header files */
#include <atomic>
#include <condition_variable>
#include <mutex>
#include <queue>
......@@ -55,6 +56,7 @@ class Rdb_sst_file {
rocksdb::Status open();
rocksdb::Status put(const rocksdb::Slice &key, const rocksdb::Slice &value);
rocksdb::Status commit();
const std::string get_name() const { return m_name; }
};
class Rdb_sst_info {
......@@ -70,6 +72,7 @@ class Rdb_sst_info {
uint m_sst_count;
std::string m_error_msg;
std::string m_prefix;
static std::atomic<uint64_t> m_prefix_counter;
static std::string m_suffix;
#if defined(RDB_SST_INFO_USE_THREAD)
std::queue<Rdb_sst_file *> m_queue;
......@@ -83,7 +86,7 @@ class Rdb_sst_info {
int open_new_sst_file();
void close_curr_sst_file();
void set_error_msg(const std::string &msg);
void set_error_msg(const std::string &sst_file_name, const std::string &msg);
#if defined(RDB_SST_INFO_USE_THREAD)
void run_thread();
......
......@@ -28,6 +28,7 @@ void *Rdb_thread::thread_func(void *const thread_ptr) {
DBUG_ASSERT(thread_ptr != nullptr);
Rdb_thread *const thread = static_cast<Rdb_thread *const>(thread_ptr);
if (!thread->m_run_once.exchange(true)) {
thread->setname();
thread->run();
thread->uninit();
}
......@@ -56,32 +57,24 @@ int Rdb_thread::create_thread(const std::string &thread_name
PSI_thread_key background_psi_thread_key
#endif
) {
DBUG_ASSERT(!thread_name.empty());
// Make a copy of the name so we can return without worrying that the
// caller will free the memory
m_name = thread_name;
int err = mysql_thread_create(background_psi_thread_key, &m_handle, nullptr,
return mysql_thread_create(background_psi_thread_key, &m_handle, nullptr,
thread_func, this);
if (!err) {
/*
mysql_thread_create() ends up doing some work underneath and setting the
thread name as "my-func". This isn't what we want. Our intent is to name
the threads according to their purpose so that when displayed under the
debugger then they'll be more easily identifiable. Therefore we'll reset
the name if thread was successfully created.
*/
err = pthread_setname_np(m_handle, thread_name.c_str());
}
return err;
}
void Rdb_thread::signal(const bool &stop_thread) {
mysql_mutex_lock(&m_signal_mutex);
RDB_MUTEX_LOCK_CHECK(m_signal_mutex);
if (stop_thread) {
m_stop = true;
}
mysql_cond_signal(&m_signal_cond);
mysql_mutex_unlock(&m_signal_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex);
}
} // namespace myrocks
......@@ -40,6 +40,8 @@ class Rdb_thread {
pthread_t m_handle;
std::string m_name;
protected:
mysql_mutex_t m_signal_mutex;
mysql_cond_t m_signal_cond;
......@@ -64,6 +66,31 @@ class Rdb_thread {
int join() { return pthread_join(m_handle, nullptr); }
void setname() {
/*
mysql_thread_create() ends up doing some work underneath and setting the
thread name as "my-func". This isn't what we want. Our intent is to name
the threads according to their purpose so that when displayed under the
debugger then they'll be more easily identifiable. Therefore we'll reset
the name if thread was successfully created.
*/
/*
We originally had the creator also set the thread name, but that seems to
not work correctly in all situations. Having the created thread do the
pthread_setname_np resolves the issue.
*/
DBUG_ASSERT(!m_name.empty());
int err = pthread_setname_np(m_handle, m_name.c_str());
if (err)
{
// NO_LINT_DEBUG
sql_print_warning(
"MyRocks: Failed to set name (%s) for current thread, errno=%d",
m_name.c_str(), errno);
}
}
void uninit();
virtual ~Rdb_thread() {}
......@@ -92,9 +119,11 @@ class Rdb_background_thread : public Rdb_thread {
virtual void run() override;
void request_save_stats() {
mysql_mutex_lock(&m_signal_mutex);
RDB_MUTEX_LOCK_CHECK(m_signal_mutex);
m_save_stats = true;
mysql_mutex_unlock(&m_signal_mutex);
RDB_MUTEX_UNLOCK_CHECK(m_signal_mutex);
}
};
......
......@@ -20,6 +20,7 @@
/* C++ standard header files */
#include <array>
#include <string>
#include <vector>
/* C standard header files */
#include <ctype.h>
......@@ -212,6 +213,22 @@ const char *rdb_skip_id(const struct charset_info_st *const cs,
return rdb_parse_id(cs, str, nullptr);
}
/*
Parses a given string into tokens (if any) separated by a specific delimiter.
*/
const std::vector<std::string> parse_into_tokens(
const std::string& s, const char delim) {
std::vector<std::string> tokens;
std::string t;
std::stringstream ss(s);
while (getline(ss, t, delim)) {
tokens.push_back(t);
}
return tokens;
}
static const std::size_t rdb_hex_bytes_per_char = 2;
static const std::array<char, 16> rdb_hexdigit = {{'0', '1', '2', '3', '4', '5',
'6', '7', '8', '9', 'a', 'b',
......
......@@ -18,8 +18,11 @@
/* C++ standard header files */
#include <chrono>
#include <string>
#include <vector>
/* MySQL header files */
#include "../sql/log.h"
#include "./my_stacktrace.h"
#include "./sql_string.h"
/* RocksDB header files */
......@@ -129,6 +132,16 @@ namespace myrocks {
#define HA_EXIT_SUCCESS FALSE
#define HA_EXIT_FAILURE TRUE
/*
Macros to better convey the intent behind checking the results from locking
and unlocking mutexes.
*/
#define RDB_MUTEX_LOCK_CHECK(m) \
rdb_check_mutex_call_result(__PRETTY_FUNCTION__, true, mysql_mutex_lock(&m))
#define RDB_MUTEX_UNLOCK_CHECK(m) \
rdb_check_mutex_call_result(__PRETTY_FUNCTION__, false, \
mysql_mutex_unlock(&m))
/*
Generic constant.
*/
......@@ -203,6 +216,28 @@ inline int purge_all_jemalloc_arenas() {
#endif
}
/*
Helper function to check the result of locking or unlocking a mutex. We'll
intentionally abort in case of a failure because it's better to terminate
the process instead of continuing in an undefined state and corrupting data
as a result.
*/
inline void rdb_check_mutex_call_result(const char *function_name,
const bool attempt_lock,
const int result) {
if (unlikely(result)) {
/* NO_LINT_DEBUG */
sql_print_error("%s a mutex inside %s failed with an "
"error code %d.",
attempt_lock ? "Locking" : "Unlocking", function_name,
result);
// This will hopefully result in a meaningful stack trace which we can use
// to efficiently debug the root cause.
abort_with_stack_traces();
}
}
/*
Helper functions to parse strings.
*/
......@@ -230,6 +265,9 @@ const char *rdb_parse_id(const struct charset_info_st *const cs,
const char *rdb_skip_id(const struct charset_info_st *const cs, const char *str)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
const std::vector<std::string> parse_into_tokens(const std::string& s,
const char delim);
/*
Helper functions to populate strings.
*/
......
......@@ -3,13 +3,13 @@
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
#include "rocksdb/ldb_tool.h"
#include "../rdb_comparator.h"
#include "rocksdb/ldb_tool.h"
int main(int argc, char** argv) {
int main(int argc, char **argv) {
rocksdb::Options db_options;
const myrocks::Rdb_pk_comparator pk_comparator;
db_options.comparator= &pk_comparator;
db_options.comparator = &pk_comparator;
rocksdb::LDBTool tool;
tool.Run(argc, argv, db_options);
......
......@@ -18,30 +18,24 @@
#include "../ha_rocksdb.h"
#include "../rdb_datadic.h"
void putKeys(myrocks::Rdb_tbl_prop_coll* coll,
int num,
bool is_delete,
uint64_t expected_deleted)
{
void putKeys(myrocks::Rdb_tbl_prop_coll *coll, int num, bool is_delete,
uint64_t expected_deleted) {
std::string str("aaaaaaaaaaaaaa");
rocksdb::Slice sl(str.data(), str.size());
for (int i=0; i < num; i++) {
for (int i = 0; i < num; i++) {
coll->AddUserKey(
sl, sl,
is_delete ? rocksdb::kEntryDelete : rocksdb::kEntryPut,
0, 100);
sl, sl, is_delete ? rocksdb::kEntryDelete : rocksdb::kEntryPut, 0, 100);
}
DBUG_ASSERT(coll->GetMaxDeletedRows() == expected_deleted);
}
int main(int argc, char** argv)
{
int main(int argc, char **argv) {
// test the circular buffer for delete flags
myrocks::Rdb_compact_params params;
params.m_file_size= 333;
params.m_deletes= 333; // irrelevant
params.m_window= 10;
params.m_file_size = 333;
params.m_deletes = 333; // irrelevant
params.m_window = 10;
myrocks::Rdb_tbl_prop_coll coll(nullptr, params, 0,
RDB_DEFAULT_TBL_STATS_SAMPLE_PCT);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment