Commit 65d01da2 authored by Sergei Petrunia's avatar Sergei Petrunia

Copy of

commit ba00e640f658ad8d0a4dff09a497a51b8a4de935
Author: Herman Lee <herman@fb.com>
Date:   Wed Feb 22 06:30:06 2017 -0800

    Improve add_index_alter_cardinality test

    Summary:
    Split add_index_inplace_cardinality test out and add a debug_sync point
    to it so that the flush of the memtable occurs while the alter is
    running.
    Closes https://github.com/facebook/mysql-5.6/pull/539

    Reviewed By: alxyang

    Differential Revision: D4597887

    Pulled By: hermanlee

    fbshipit-source-id: faedda2
parent 7468ccfa
---
Language: Cpp
# BasedOnStyle: LLVM
AccessModifierOffset: -2
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignEscapedNewlinesLeft: false
AlignOperands: true
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: false
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Attach
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: true
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ]
IncludeCategories:
- Regex: '^"(llvm|llvm-c|clang|clang-c)/'
Priority: 2
- Regex: '^(<|"(gtest|isl|json)/)'
Priority: 3
- Regex: '.*'
Priority: 1
IncludeIsMainRegex: '$'
IndentCaseLabels: false
IndentWidth: 2
IndentWrappedFunctionNames: false
KeepEmptyLinesAtTheStartOfBlocks: true
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakBeforeFirstCallParameter: 19
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
ReflowComments: true
SortIncludes: true
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp11
TabWidth: 8
UseTab: Never
JavaScriptQuotes: Leave
...
......@@ -28,6 +28,11 @@ INCLUDE_DIRECTORIES(
ADD_DEFINITIONS(-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DOS_LINUX
-DZLIB)
CHECK_FUNCTION_EXISTS(fallocate HAVE_FALLOCATE)
IF(HAVE_FALLOCATE)
ADD_DEFINITIONS(-DROCKSDB_FALLOCATE_PRESENT)
ENDIF()
SET(ROCKSDB_SOURCES
ha_rocksdb.cc ha_rocksdb.h ha_rocksdb_proto.h
logger.h
......@@ -41,6 +46,7 @@ SET(ROCKSDB_SOURCES
rdb_index_merge.cc rdb_index_merge.h
rdb_perf_context.cc rdb_perf_context.h
rdb_mutex_wrapper.cc rdb_mutex_wrapper.h
rdb_psi.h rdb_psi.cc
rdb_sst_info.cc rdb_sst_info.h
rdb_utils.cc rdb_utils.h rdb_buff.h
rdb_threads.cc rdb_threads.h
......
This diff is collapsed.
......@@ -121,6 +121,34 @@ const char *const BG_THREAD_NAME = "myrocks-bg";
*/
const char *const INDEX_THREAD_NAME = "myrocks-index";
/*
Separator between partition name and the qualifier. Sample usage:
- p0_cfname=foo
- p3_tts_col=bar
*/
const char RDB_PER_PARTITION_QUALIFIER_NAME_SEP = '_';
/*
Separator between qualifier name and value. Sample usage:
- p0_cfname=foo
- p3_tts_col=bar
*/
const char RDB_PER_PARTITION_QUALIFIER_VALUE_SEP = '=';
/*
Separator between multiple qualifier assignments. Sample usage:
- p0_cfname=foo;p1_cfname=bar;p2_cfname=baz
*/
const char RDB_QUALIFIER_SEP = ';';
/*
Qualifier name for a custom per partition column family.
*/
const char *const RDB_CF_NAME_QUALIFIER = "cfname";
/*
Default, minimal valid, and maximum valid sampling rate values when collecting
statistics about table.
......@@ -192,7 +220,9 @@ const char *const INDEX_THREAD_NAME = "myrocks-index";
#define HA_ERR_ROCKSDB_UNIQUE_NOT_SUPPORTED (HA_ERR_LAST + 1)
#define HA_ERR_ROCKSDB_PK_REQUIRED (HA_ERR_LAST + 2)
#define HA_ERR_ROCKSDB_TOO_MANY_LOCKS (HA_ERR_LAST + 3)
#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TOO_MANY_LOCKS
#define HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED (HA_ERR_LAST + 4)
#define HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED (HA_ERR_LAST + 5)
#define HA_ERR_ROCKSDB_LAST HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED
inline bool looks_like_per_index_cf_typo(const char *const name) {
return (name && name[0] == '$' && strcmp(name, PER_INDEX_CF_NAME));
......@@ -258,6 +288,7 @@ enum operation_type {
ROWS_INSERTED,
ROWS_READ,
ROWS_UPDATED,
ROWS_DELETED_BLIND,
ROWS_MAX
};
......@@ -282,6 +313,7 @@ struct st_export_stats {
ulonglong rows_inserted;
ulonglong rows_read;
ulonglong rows_updated;
ulonglong rows_deleted_blind;
ulonglong system_rows_deleted;
ulonglong system_rows_inserted;
......@@ -474,13 +506,13 @@ class ha_rocksdb : public my_core::handler {
const rocksdb::Slice &key, std::string *const value) const;
int get_row_by_rowid(uchar *const buf, const char *const rowid,
const uint rowid_size)
const uint rowid_size, const bool skip_lookup = false)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int get_row_by_rowid(uchar *const buf, const uchar *const rowid,
const uint rowid_size)
const uint rowid_size, const bool skip_lookup = false)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__)) {
return get_row_by_rowid(buf, reinterpret_cast<const char *>(rowid),
rowid_size);
rowid_size, skip_lookup);
}
void update_auto_incr_val();
......@@ -491,6 +523,7 @@ class ha_rocksdb : public my_core::handler {
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
bool can_use_single_delete(const uint &index) const
MY_ATTRIBUTE((__warn_unused_result__));
bool is_blind_delete_enabled();
bool skip_unique_check() const MY_ATTRIBUTE((__warn_unused_result__));
void set_force_skip_unique_check(bool skip) override;
bool commit_in_the_middle() MY_ATTRIBUTE((__warn_unused_result__));
......@@ -667,6 +700,21 @@ class ha_rocksdb : public my_core::handler {
int rename_table(const char *const from, const char *const to) override
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int convert_blob_from_storage_format(my_core::Field_blob *const blob,
Rdb_string_reader *const reader,
bool decode)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int convert_varchar_from_storage_format(
my_core::Field_varstring *const field_var,
Rdb_string_reader *const reader, bool decode)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int convert_field_from_storage_format(my_core::Field *const field,
Rdb_string_reader *const reader,
bool decode, uint len)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int convert_record_from_storage_format(const rocksdb::Slice *const key,
const rocksdb::Slice *const value,
uchar *const buf)
......@@ -681,6 +729,17 @@ class ha_rocksdb : public my_core::handler {
rocksdb::Slice *const packed_rec)
MY_ATTRIBUTE((__nonnull__));
static const std::string gen_cf_name_qualifier_for_partition(
const std::string &s);
static const std::vector<std::string> parse_into_tokens(const std::string &s,
const char delim);
static const std::string generate_cf_name(const uint index,
const TABLE *const table_arg,
const Rdb_tbl_def *const tbl_def_arg,
bool *per_part_match_found);
static const char *get_key_name(const uint index,
const TABLE *const table_arg,
const Rdb_tbl_def *const tbl_def_arg)
......@@ -702,7 +761,6 @@ class ha_rocksdb : public my_core::handler {
static bool is_pk(const uint index, const TABLE *table_arg,
const Rdb_tbl_def *tbl_def_arg)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
/** @brief
unireg.cc will call max_supported_record_length(), max_supported_keys(),
max_supported_key_parts(), uint max_supported_key_length()
......@@ -827,6 +885,7 @@ class ha_rocksdb : public my_core::handler {
rocksdb::ColumnFamilyHandle *cf_handle;
bool is_reverse_cf;
bool is_auto_cf;
bool is_per_partition_cf;
};
struct update_row_info {
......@@ -946,10 +1005,8 @@ class ha_rocksdb : public my_core::handler {
int read_before_key(const Rdb_key_def &kd, const bool &using_full_key,
const rocksdb::Slice &key_slice)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int read_after_key(const Rdb_key_def &kd, const bool &using_full_key,
const rocksdb::Slice &key_slice)
int read_after_key(const Rdb_key_def &kd, const rocksdb::Slice &key_slice)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int position_to_correct_key(
const Rdb_key_def &kd, const enum ha_rkey_function &find_flag,
const bool &full_key_match, const uchar *const key,
......
......@@ -23,6 +23,10 @@ namespace myrocks {
class Rdb_logger : public rocksdb::Logger {
public:
explicit Rdb_logger(const rocksdb::InfoLogLevel log_level =
rocksdb::InfoLogLevel::ERROR_LEVEL)
: m_mysql_log_level(log_level) {}
void Logv(const rocksdb::InfoLogLevel log_level, const char *format,
va_list ap) override {
DBUG_ASSERT(format != nullptr);
......@@ -33,7 +37,7 @@ class Rdb_logger : public rocksdb::Logger {
m_logger->Logv(log_level, format, ap);
}
if (log_level < GetInfoLogLevel()) {
if (log_level < m_mysql_log_level) {
return;
}
......@@ -61,8 +65,21 @@ class Rdb_logger : public rocksdb::Logger {
m_logger = logger;
}
void SetInfoLogLevel(const rocksdb::InfoLogLevel log_level) override {
// The InfoLogLevel for the logger is used by rocksdb to filter
// messages, so it needs to be the lower of the two loggers
rocksdb::InfoLogLevel base_level = log_level;
if (m_logger && m_logger->GetInfoLogLevel() < base_level) {
base_level = m_logger->GetInfoLogLevel();
}
rocksdb::Logger::SetInfoLogLevel(base_level);
m_mysql_log_level = log_level;
}
private:
std::shared_ptr<rocksdb::Logger> m_logger;
rocksdb::InfoLogLevel m_mysql_log_level;
};
} // namespace myrocks
......@@ -5,7 +5,7 @@ USE mysqlslap;
CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE=rocksdb;
# 2PC enabled, MyRocks durability enabled
SET GLOBAL rocksdb_enable_2pc=0;
SET GLOBAL rocksdb_write_sync=1;
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
## 2PC + durability + single thread
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
select case when variable_value-@c = 1000 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
......@@ -18,7 +18,7 @@ case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true' else '
false
# 2PC enabled, MyRocks durability disabled
SET GLOBAL rocksdb_enable_2pc=0;
SET GLOBAL rocksdb_write_sync=0;
SET GLOBAL rocksdb_flush_log_at_trx_commit=0;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
case when variable_value-@c = 0 then 'true' else 'false' end
......@@ -29,7 +29,7 @@ case when variable_value-@c = 0 then 'true' else 'false' end
true
# 2PC disabled, MyRocks durability enabled
SET GLOBAL rocksdb_enable_2pc=1;
SET GLOBAL rocksdb_write_sync=1;
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
select case when variable_value-@c = 0 then 'true' else 'false' end from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
case when variable_value-@c = 0 then 'true' else 'false' end
......@@ -39,6 +39,6 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa
case when variable_value-@c = 0 then 'true' else 'false' end
false
SET GLOBAL rocksdb_enable_2pc=1;
SET GLOBAL rocksdb_write_sync=0;
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
DROP TABLE t1;
DROP DATABASE mysqlslap;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (i INT PRIMARY KEY, j INT) ENGINE = ROCKSDB;
INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10);
SET debug_sync= 'rocksdb.commit_in_place_alter_table WAIT_FOR flushed';
ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
SET GLOBAL rocksdb_force_flush_memtable_now = 1;
SET debug_sync= 'now SIGNAL flushed';
SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
WHERE INDEX_NUMBER =
(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY");
COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX
# # SSTNAME 5 # # # # # 5
SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
WHERE INDEX_NUMBER =
(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
WHERE TABLE_NAME = 't1' AND INDEX_NAME = "kj");
COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX
# # SSTNAME 5 # # # # # 5,5
SET debug_sync='RESET';
DROP TABLE t1;
include/master-slave.inc
Warnings:
Note #### Sending passwords in plain text without SSL/TLS is extremely insecure.
Note #### Storing MySQL user name or password information in the master info repository is not secure and is therefore not recommended. Please consider using the USER and PASSWORD connection options for START SLAVE; see the 'START SLAVE Syntax' in the MySQL Manual for more information.
[connection master]
set @save_rocksdb_blind_delete_primary_key=@@session.rocksdb_blind_delete_primary_key;
set @save_rocksdb_master_skip_tx_api=@@session.rocksdb_master_skip_tx_api;
DROP TABLE IF EXISTS t1,t2;
create table t1 (id int primary key, value int, value2 varchar(200)) engine=rocksdb;
create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb;
SET session rocksdb_blind_delete_primary_key=1;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
variable_value-@c
1000
SELECT count(*) FROM t1;
count(*)
9000
include/sync_slave_sql_with_master.inc
SELECT count(*) FROM t1;
count(*)
9000
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
variable_value-@c
0
SELECT count(*) FROM t2;
count(*)
9000
SET session rocksdb_master_skip_tx_api=1;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
variable_value-@c
1000
SELECT count(*) FROM t1;
count(*)
8000
SELECT count(*) FROM t2;
count(*)
8000
include/sync_slave_sql_with_master.inc
SELECT count(*) FROM t1;
count(*)
8000
SELECT count(*) FROM t2;
count(*)
8000
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
DELETE FROM t1 WHERE id BETWEEN 3001 AND 4000;
DELETE FROM t2 WHERE id BETWEEN 3001 AND 4000;
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
variable_value-@c
0
SELECT count(*) FROM t1;
count(*)
7000
SELECT count(*) FROM t2;
count(*)
7000
include/sync_slave_sql_with_master.inc
SELECT count(*) FROM t1;
count(*)
7000
SELECT count(*) FROM t2;
count(*)
7000
DELETE FROM t1 WHERE id = 10;
SELECT count(*) FROM t1;
count(*)
7000
call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table test.t1.*Error_code.*");
call mtr.add_suppression("Slave: Can't find record in 't1'.*");
include/wait_for_slave_sql_error.inc [errno=1032]
set @save_rocksdb_read_free_rpl_tables=@@global.rocksdb_read_free_rpl_tables;
set global rocksdb_read_free_rpl_tables="t.*";
START SLAVE;
include/sync_slave_sql_with_master.inc
SELECT count(*) FROM t1;
count(*)
7000
set global rocksdb_read_free_rpl_tables=@save_rocksdb_read_free_rpl_tables;
SET session rocksdb_blind_delete_primary_key=@save_rocksdb_blind_delete_primary_key;
SET session rocksdb_master_skip_tx_api=@save_rocksdb_master_skip_tx_api;
DROP TABLE t1, t2;
include/rpl_end.inc
DROP TABLE IF EXISTS t1, t2;
DROP TABLE IF EXISTS t1, t2, t3;
CREATE TABLE t1(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin';
CREATE TABLE t2(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin';
CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE 'latin1_bin'
......@@ -19,9 +19,9 @@ LOAD DATA INFILE <input_file> INTO TABLE t3;
set rocksdb_bulk_load=0;
SHOW TABLE STATUS WHERE name LIKE 't%';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t2 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t3 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
ANALYZE TABLE t1, t2, t3;
Table Op Msg_type Msg_text
test.t1 analyze status OK
......@@ -29,36 +29,36 @@ test.t2 analyze status OK
test.t3 analyze status OK
SHOW TABLE STATUS WHERE name LIKE 't%';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
t1 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t2 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t3 ROCKSDB 10 Fixed 10000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
t1 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t2 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL
t3 ROCKSDB 10 Fixed 5000000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned
select count(pk) from t1;
count(pk)
10000000
5000000
select count(a) from t1;
count(a)
10000000
5000000
select count(b) from t1;
count(b)
10000000
5000000
select count(pk) from t2;
count(pk)
10000000
5000000
select count(a) from t2;
count(a)
10000000
5000000
select count(b) from t2;
count(b)
10000000
5000000
select count(pk) from t3;
count(pk)
10000000
5000000
select count(a) from t3;
count(a)
10000000
5000000
select count(b) from t3;
count(b)
10000000
5000000
longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp
test.bulk_load.tmp
DROP TABLE t1, t2, t3;
......@@ -125,4 +125,5 @@ CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=ro
CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
ERROR HY000: Unsupported collation on string indexed column test.abcd.value Use binary collation (binary, latin1_bin, utf8_bin).
DROP TABLE abc;
SET GLOBAL rocksdb_strict_collation_exceptions=null;
SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (i INT, PRIMARY KEY (i) COMMENT '__system__') ENGINE = ROCKSDB;
ERROR HY000: Incorrect arguments to column family not valid for storing index data
ERROR HY000: Incorrect arguments to column family not valid for storing index data.
DROP TABLE IF EXISTS t1;
......@@ -66,7 +66,7 @@ Handler_read_prev 0
Handler_read_rnd 0
Handler_read_rnd_next 10
FLUSH STATUS;
SELECT * FROM t1 WHERE b <=5 ORDER BY b;
SELECT * FROM t1 FORCE INDEX(b) WHERE b <=5 ORDER BY b;
id a b
4 NULL 4
5 NULL 5
......
......@@ -22,7 +22,7 @@ insert into linktable (id1, link_type, id2) values (2, 1, 7);
insert into linktable (id1, link_type, id2) values (2, 1, 8);
insert into linktable (id1, link_type, id2) values (2, 1, 9);
insert into linktable (id1, link_type, id2) values (2, 1, 10);
explain select id1, id2, link_type, data from linktable where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5);
explain select id1, id2, link_type, data from linktable force index(primary) where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE linktable range PRIMARY,id1_type PRIMARY 24 NULL # Using where
1 SIMPLE linktable range PRIMARY PRIMARY 24 NULL # Using where
drop table linktable;
drop table if exists t;
Warnings:
Note 1051 Unknown table 'test.t'
create table t (
a int,
b int,
c varchar(12249) collate latin1_bin,
d datetime,
e int,
f int,
g blob,
h int,
i int,
key (b,e),
key (h,b)
) engine=rocksdb
partition by linear hash (i) partitions 8 ;
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
select i from t group by h;
i
1
select i from t group by h;
i
1
drop table t;
......@@ -124,6 +124,51 @@ UNLOCK TABLES;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
2
==== mysqldump with --innodb-stats-on-metadata ====
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8 */;
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
/*!40103 SET TIME_ZONE='+00:00' */;
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=3893;
DROP TABLE IF EXISTS `r1`;
/*!40101 SET @saved_cs_client = @@character_set_client */;
/*!40101 SET character_set_client = utf8 */;
CREATE TABLE `r1` (
`id1` int(11) NOT NULL DEFAULT '0',
`id2` int(11) NOT NULL DEFAULT '0',
`id3` varchar(100) NOT NULL DEFAULT '',
`id4` int(11) NOT NULL DEFAULT '0',
`value1` int(11) DEFAULT NULL,
`value2` int(11) DEFAULT NULL,
`value3` int(11) DEFAULT NULL,
`value4` int(11) DEFAULT NULL,
PRIMARY KEY (`id1`,`id2`,`id3`,`id4`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1;
/*!40101 SET character_set_client = @saved_cs_client */;
/* ORDERING KEY : (null) */;
LOCK TABLES `r1` WRITE;
/*!40000 ALTER TABLE `r1` DISABLE KEYS */;
INSERT INTO `r1` VALUES (1,1,'1',1,1,1,1,1),(1,1,'1',2,2,2,2,2),(1,1,'2',1,3,3,3,3),(1,1,'2',2,4,4,4,4),(1,2,'1',1,5,5,5,5),(1,2,'1',2,6,6,6,6),(1,2,'2',1,7,7,7,7),(1,2,'2',2,8,8,8,8),(2,1,'1',1,9,9,9,9),(2,1,'1',2,10,10,10,10),(2,1,'2',1,11,11,11,11),(2,1,'2',2,12,12,12,12),(2,2,'1',1,13,13,13,13),(2,2,'1',2,14,14,14,14),(2,2,'2',1,15,15,15,15),(2,2,'2',2,16,16,16,16);
/*!40000 ALTER TABLE `r1` ENABLE KEYS */;
UNLOCK TABLES;
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
SET GLOBAL binlog_format=statement;
SET GLOBAL binlog_format=row;
drop table r1;
......
......@@ -864,6 +864,7 @@ rocksdb_allow_mmap_reads OFF
rocksdb_allow_mmap_writes OFF
rocksdb_background_sync OFF
rocksdb_base_background_compactions 1
rocksdb_blind_delete_primary_key OFF
rocksdb_block_cache_size 536870912
rocksdb_block_restart_interval 16
rocksdb_block_size 4096
......@@ -889,14 +890,16 @@ rocksdb_db_write_buffer_size 0
rocksdb_deadlock_detect OFF
rocksdb_debug_optimizer_no_zero_cardinality ON
rocksdb_default_cf_options
rocksdb_delayed_write_rate 16777216
rocksdb_delete_obsolete_files_period_micros 21600000000
rocksdb_disabledatasync OFF
rocksdb_enable_2pc ON
rocksdb_enable_bulk_load_api ON
rocksdb_enable_thread_tracking OFF
rocksdb_enable_write_thread_adaptive_yield OFF
rocksdb_error_if_exists OFF
rocksdb_flush_log_at_trx_commit 1
rocksdb_flush_memtable_on_analyze ON
rocksdb_force_compute_memtable_stats ON
rocksdb_force_flush_memtable_now OFF
rocksdb_force_index_records_in_range 0
rocksdb_hash_index_allow_collision ON
......@@ -908,6 +911,7 @@ rocksdb_lock_scanned_rows OFF
rocksdb_lock_wait_timeout 1
rocksdb_log_file_time_to_roll 0
rocksdb_manifest_preallocation_size 4194304
rocksdb_master_skip_tx_api OFF
rocksdb_max_background_compactions 1
rocksdb_max_background_flushes 1
rocksdb_max_log_file_size 0
......@@ -925,7 +929,7 @@ rocksdb_paranoid_checks ON
rocksdb_pause_background_work ON
rocksdb_perf_context_level 0
rocksdb_persistent_cache_path
rocksdb_persistent_cache_size 0
rocksdb_persistent_cache_size_mb 0
rocksdb_pin_l0_filter_and_index_blocks_in_cache ON
rocksdb_print_snapshot_conflict_queries OFF
rocksdb_rate_limiter_bytes_per_sec 0
......@@ -953,25 +957,37 @@ rocksdb_validate_tables 1
rocksdb_verify_row_debug_checksums OFF
rocksdb_wal_bytes_per_sync 0
rocksdb_wal_dir
rocksdb_wal_recovery_mode 2
rocksdb_wal_recovery_mode 1
rocksdb_wal_size_limit_mb 0
rocksdb_wal_ttl_seconds 0
rocksdb_whole_key_filtering ON
rocksdb_write_disable_wal OFF
rocksdb_write_ignore_missing_column_families OFF
rocksdb_write_sync OFF
create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb;
insert into t47 values (1, 'row1');
insert into t47 values (2, 'row2');
set rocksdb_bulk_load=1;
insert into t47 values (3, 'row3'),(4, 'row4');
set rocksdb_bulk_load=0;
connect con1,localhost,root,,;
set rocksdb_bulk_load=1;
insert into t47 values (10, 'row10'),(11, 'row11');
connection default;
set rocksdb_bulk_load=1;
insert into t47 values (100, 'row100'),(101, 'row101');
disconnect con1;
connection default;
set rocksdb_bulk_load=0;
select * from t47;
pk col1
1 row1
2 row2
3 row3
4 row4
10 row10
11 row11
100 row100
101 row101
drop table t47;
#
# Fix TRUNCATE over empty table (transaction is committed when it wasn't
......@@ -1410,6 +1426,7 @@ rocksdb_rows_deleted #
rocksdb_rows_inserted #
rocksdb_rows_read #
rocksdb_rows_updated #
rocksdb_rows_deleted_blind #
rocksdb_system_rows_deleted #
rocksdb_system_rows_inserted #
rocksdb_system_rows_read #
......@@ -1482,6 +1499,7 @@ ROCKSDB_ROWS_DELETED
ROCKSDB_ROWS_INSERTED
ROCKSDB_ROWS_READ
ROCKSDB_ROWS_UPDATED
ROCKSDB_ROWS_DELETED_BLIND
ROCKSDB_SYSTEM_ROWS_DELETED
ROCKSDB_SYSTEM_ROWS_INSERTED
ROCKSDB_SYSTEM_ROWS_READ
......@@ -1556,6 +1574,7 @@ ROCKSDB_ROWS_DELETED
ROCKSDB_ROWS_INSERTED
ROCKSDB_ROWS_READ
ROCKSDB_ROWS_UPDATED
ROCKSDB_ROWS_DELETED_BLIND
ROCKSDB_SYSTEM_ROWS_DELETED
ROCKSDB_SYSTEM_ROWS_INSERTED
ROCKSDB_SYSTEM_ROWS_READ
......@@ -1737,7 +1756,7 @@ key1 int,
PRIMARY KEY (id),
index (key1) comment 'test.t1.key1'
) engine=rocksdb;
ERROR HY000: Column Family Flag is different from existing flag. Assign a new CF flag, or do not change existing CF flag.
ERROR HY000: Column family ('test.t1.key1') flag (0) is different from an existing flag (2). Assign a new CF flag, or do not change existing CF flag.
create table t1_err (
id int not null,
key1 int,
......@@ -1763,7 +1782,7 @@ key1 int,
PRIMARY KEY (id),
index (key1) comment '$per_idnex_cf'
)engine=rocksdb;
ERROR 42000: This version of MySQL doesn't yet support 'column family name looks like a typo of $per_index_cf'
ERROR 42000: This version of MySQL doesn't yet support 'column family name looks like a typo of $per_index_cf.'
#
# Issue #22: SELECT ... FOR UPDATE takes a long time
#
......
......@@ -12,7 +12,6 @@ Type Name Status
DBSTATS rocksdb #
CF_COMPACTION __system__ #
CF_COMPACTION cf_t1 #
CF_COMPACTION cf_t4 #
CF_COMPACTION default #
CF_COMPACTION rev:cf_t2 #
Memory_Stats rocksdb #
......@@ -48,15 +47,6 @@ cf_t1 NUM_ENTRIES_ACTIVE_MEM_TABLE #
cf_t1 NUM_ENTRIES_IMM_MEM_TABLES #
cf_t1 NON_BLOCK_CACHE_SST_MEM_USAGE #
cf_t1 NUM_LIVE_VERSIONS #
cf_t4 NUM_IMMUTABLE_MEM_TABLE #
cf_t4 MEM_TABLE_FLUSH_PENDING #
cf_t4 COMPACTION_PENDING #
cf_t4 CUR_SIZE_ACTIVE_MEM_TABLE #
cf_t4 CUR_SIZE_ALL_MEM_TABLES #
cf_t4 NUM_ENTRIES_ACTIVE_MEM_TABLE #
cf_t4 NUM_ENTRIES_IMM_MEM_TABLES #
cf_t4 NON_BLOCK_CACHE_SST_MEM_USAGE #
cf_t4 NUM_LIVE_VERSIONS #
default NUM_IMMUTABLE_MEM_TABLE #
default MEM_TABLE_FLUSH_PENDING #
default COMPACTION_PENDING #
......@@ -117,7 +107,6 @@ __system__ RATE_LIMIT_DELAY_MAX_MILLISECONDS #
__system__ ARENA_BLOCK_SIZE #
__system__ DISABLE_AUTO_COMPACTIONS #
__system__ PURGE_REDUNDANT_KVS_WHILE_FLUSH #
__system__ VERIFY_CHECKSUM_IN_COMPACTION #
__system__ MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
__system__ MEMTABLE_FACTORY #
__system__ INPLACE_UPDATE_SUPPORT #
......@@ -126,7 +115,6 @@ __system__ MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
__system__ MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
__system__ BLOOM_LOCALITY #
__system__ MAX_SUCCESSIVE_MERGES #
__system__ MIN_PARTIAL_MERGE_OPERANDS #
__system__ OPTIMIZE_FILTERS_FOR_HITS #
__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
__system__ COMPRESSION_TYPE #
......@@ -173,7 +161,6 @@ cf_t1 RATE_LIMIT_DELAY_MAX_MILLISECONDS #
cf_t1 ARENA_BLOCK_SIZE #
cf_t1 DISABLE_AUTO_COMPACTIONS #
cf_t1 PURGE_REDUNDANT_KVS_WHILE_FLUSH #
cf_t1 VERIFY_CHECKSUM_IN_COMPACTION #
cf_t1 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
cf_t1 MEMTABLE_FACTORY #
cf_t1 INPLACE_UPDATE_SUPPORT #
......@@ -182,7 +169,6 @@ cf_t1 MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
cf_t1 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
cf_t1 BLOOM_LOCALITY #
cf_t1 MAX_SUCCESSIVE_MERGES #
cf_t1 MIN_PARTIAL_MERGE_OPERANDS #
cf_t1 OPTIMIZE_FILTERS_FOR_HITS #
cf_t1 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
cf_t1 COMPRESSION_TYPE #
......@@ -206,62 +192,6 @@ cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE #
cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION #
cf_t1 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL #
cf_t1 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION #
cf_t4 COMPARATOR #
cf_t4 MERGE_OPERATOR #
cf_t4 COMPACTION_FILTER #
cf_t4 COMPACTION_FILTER_FACTORY #
cf_t4 WRITE_BUFFER_SIZE #
cf_t4 MAX_WRITE_BUFFER_NUMBER #
cf_t4 MIN_WRITE_BUFFER_NUMBER_TO_MERGE #
cf_t4 NUM_LEVELS #
cf_t4 LEVEL0_FILE_NUM_COMPACTION_TRIGGER #
cf_t4 LEVEL0_SLOWDOWN_WRITES_TRIGGER #
cf_t4 LEVEL0_STOP_WRITES_TRIGGER #
cf_t4 MAX_MEM_COMPACTION_LEVEL #
cf_t4 TARGET_FILE_SIZE_BASE #
cf_t4 TARGET_FILE_SIZE_MULTIPLIER #
cf_t4 MAX_BYTES_FOR_LEVEL_BASE #
cf_t4 LEVEL_COMPACTION_DYNAMIC_LEVEL_BYTES #
cf_t4 MAX_BYTES_FOR_LEVEL_MULTIPLIER #
cf_t4 SOFT_RATE_LIMIT #
cf_t4 HARD_RATE_LIMIT #
cf_t4 RATE_LIMIT_DELAY_MAX_MILLISECONDS #
cf_t4 ARENA_BLOCK_SIZE #
cf_t4 DISABLE_AUTO_COMPACTIONS #
cf_t4 PURGE_REDUNDANT_KVS_WHILE_FLUSH #
cf_t4 VERIFY_CHECKSUM_IN_COMPACTION #
cf_t4 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
cf_t4 MEMTABLE_FACTORY #
cf_t4 INPLACE_UPDATE_SUPPORT #
cf_t4 INPLACE_UPDATE_NUM_LOCKS #
cf_t4 MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
cf_t4 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
cf_t4 BLOOM_LOCALITY #
cf_t4 MAX_SUCCESSIVE_MERGES #
cf_t4 MIN_PARTIAL_MERGE_OPERANDS #
cf_t4 OPTIMIZE_FILTERS_FOR_HITS #
cf_t4 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
cf_t4 COMPRESSION_TYPE #
cf_t4 COMPRESSION_PER_LEVEL #
cf_t4 COMPRESSION_OPTS #
cf_t4 BOTTOMMOST_COMPRESSION #
cf_t4 PREFIX_EXTRACTOR #
cf_t4 COMPACTION_STYLE #
cf_t4 COMPACTION_OPTIONS_UNIVERSAL #
cf_t4 COMPACTION_OPTION_FIFO::MAX_TABLE_FILES_SIZE #
cf_t4 BLOCK_BASED_TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
cf_t4 BLOCK_BASED_TABLE_FACTORY::INDEX_TYPE #
cf_t4 BLOCK_BASED_TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
cf_t4 BLOCK_BASED_TABLE_FACTORY::CHECKSUM #
cf_t4 BLOCK_BASED_TABLE_FACTORY::NO_BLOCK_CACHE #
cf_t4 BLOCK_BASED_TABLE_FACTORY::FILTER_POLICY #
cf_t4 BLOCK_BASED_TABLE_FACTORY::WHOLE_KEY_FILTERING #
cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE #
cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_CACHE_COMPRESSED #
cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE #
cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_SIZE_DEVIATION #
cf_t4 BLOCK_BASED_TABLE_FACTORY::BLOCK_RESTART_INTERVAL #
cf_t4 BLOCK_BASED_TABLE_FACTORY::FORMAT_VERSION #
default COMPARATOR #
default MERGE_OPERATOR #
default COMPACTION_FILTER #
......@@ -285,7 +215,6 @@ default RATE_LIMIT_DELAY_MAX_MILLISECONDS #
default ARENA_BLOCK_SIZE #
default DISABLE_AUTO_COMPACTIONS #
default PURGE_REDUNDANT_KVS_WHILE_FLUSH #
default VERIFY_CHECKSUM_IN_COMPACTION #
default MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
default MEMTABLE_FACTORY #
default INPLACE_UPDATE_SUPPORT #
......@@ -294,7 +223,6 @@ default MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
default MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
default BLOOM_LOCALITY #
default MAX_SUCCESSIVE_MERGES #
default MIN_PARTIAL_MERGE_OPERANDS #
default OPTIMIZE_FILTERS_FOR_HITS #
default MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
default COMPRESSION_TYPE #
......@@ -341,7 +269,6 @@ rev:cf_t2 RATE_LIMIT_DELAY_MAX_MILLISECONDS #
rev:cf_t2 ARENA_BLOCK_SIZE #
rev:cf_t2 DISABLE_AUTO_COMPACTIONS #
rev:cf_t2 PURGE_REDUNDANT_KVS_WHILE_FLUSH #
rev:cf_t2 VERIFY_CHECKSUM_IN_COMPACTION #
rev:cf_t2 MAX_SEQUENTIAL_SKIP_IN_ITERATIONS #
rev:cf_t2 MEMTABLE_FACTORY #
rev:cf_t2 INPLACE_UPDATE_SUPPORT #
......@@ -350,7 +277,6 @@ rev:cf_t2 MEMTABLE_PREFIX_BLOOM_BITS_RATIO #
rev:cf_t2 MEMTABLE_PREFIX_BLOOM_HUGE_PAGE_TLB_SIZE #
rev:cf_t2 BLOOM_LOCALITY #
rev:cf_t2 MAX_SUCCESSIVE_MERGES #
rev:cf_t2 MIN_PARTIAL_MERGE_OPERANDS #
rev:cf_t2 OPTIMIZE_FILTERS_FOR_HITS #
rev:cf_t2 MAX_BYTES_FOR_LEVEL_MULTIPLIER_ADDITIONAL #
rev:cf_t2 COMPRESSION_TYPE #
......
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '<DATA_DIR>' INDEX DIRECTORY = '<INDEX_DIR>';
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL,
`b` char(8) DEFAULT NULL,
PRIMARY KEY (`a`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
ALTER TABLE t1 INDEX DIRECTORY = '<DATA_DIR>';
Warnings:
Warning 1618 <INDEX DIRECTORY> option ignored
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) NOT NULL,
`b` char(8) DEFAULT NULL,
PRIMARY KEY (`a`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb DATA DIRECTORY = '/foo/bar/data';
ERROR HY000: Got error 197 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB
CREATE TABLE t1 (a INT PRIMARY KEY, b CHAR(8)) ENGINE=rocksdb INDEX DIRECTORY = '/foo/bar/index';
ERROR HY000: Got error 198 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB
CREATE TABLE t1 (id INT NOT NULL PRIMARY KEY) ENGINE=rocksdb PARTITION BY RANGE (id)
(
PARTITION P0 VALUES LESS THAN (1000)
DATA DIRECTORY = '/foo/bar/data/',
PARTITION P1 VALUES LESS THAN (2000)
DATA DIRECTORY = '/foo/bar/data/',
PARTITION P2 VALUES LESS THAN (MAXVALUE)
);
ERROR HY000: Got error 197 'Specifying DATA DIRECTORY for an individual table is not supported.' from ROCKSDB
CREATE TABLE t1 (id int not null primary key) ENGINE=rocksdb PARTITION BY RANGE (id)
(
PARTITION P0 VALUES LESS THAN (1000)
INDEX DIRECTORY = '/foo/bar/data/',
PARTITION P1 VALUES LESS THAN (2000)
INDEX DIRECTORY = '/foo/bar/data/',
PARTITION P2 VALUES LESS THAN (MAXVALUE)
);
ERROR HY000: Got error 198 'Specifying INDEX DIRECTORY for an individual table is not supported.' from ROCKSDB
......@@ -70,3 +70,15 @@ id id2 value
1 1 1
set debug_sync='RESET';
drop table t1, t2;
drop table if exists t1,t2,t3;
create table t1 (id int, value int, primary key (id)) engine=rocksdb;
create table t2 (id int, id2 int, value int, primary key (id), unique key (id2)) engine=rocksdb;
create table t3 (id int, value int) engine=rocksdb;
SET @old_val = @@session.unique_checks;
set @@session.unique_checks = FALSE;
insert into t1 values (1, 1), (1, 2);
insert into t2 values (1, 1, 1), (1, 2, 1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
insert into t3 values (1, 1), (1, 1);
set @@session.unique_checks = @old_val;
drop table t1, t2, t3;
SET GLOBAL rocksdb_write_disable_wal=false;
SET GLOBAL rocksdb_write_ignore_missing_column_families=true;
create table aaa (id int primary key, i int) engine rocksdb;
SET LOCAL rocksdb_write_sync=off;
SET LOCAL rocksdb_flush_log_at_trx_commit=0;
select variable_value into @a from information_schema.global_status where variable_name='rocksdb_wal_synced';
insert aaa(id, i) values(1,1);
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
......@@ -15,7 +15,7 @@ insert aaa(id, i) values(3,1);
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
variable_value-@a
0
SET LOCAL rocksdb_write_sync=1;
SET LOCAL rocksdb_flush_log_at_trx_commit=1;
insert aaa(id, i) values(4,1);
select variable_value-@a from information_schema.global_status where variable_name='rocksdb_wal_synced';
variable_value-@a
......@@ -29,11 +29,11 @@ select variable_value-@a from information_schema.global_status where variable_na
variable_value-@a
3
SET GLOBAL rocksdb_background_sync=on;
SET LOCAL rocksdb_write_sync=off;
SET LOCAL rocksdb_flush_log_at_trx_commit=0;
insert aaa(id, i) values(7,1);
truncate table aaa;
drop table aaa;
SET GLOBAL rocksdb_write_sync=off;
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
SET GLOBAL rocksdb_write_disable_wal=false;
SET GLOBAL rocksdb_write_ignore_missing_column_families=false;
SET GLOBAL rocksdb_background_sync=off;
......@@ -14,7 +14,7 @@ CREATE TABLE t1(id BIGINT AUTO_INCREMENT, value BIGINT, PRIMARY KEY(id)) ENGINE=
--echo # 2PC enabled, MyRocks durability enabled
SET GLOBAL rocksdb_enable_2pc=0;
SET GLOBAL rocksdb_write_sync=1;
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
--echo ## 2PC + durability + single thread
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
......@@ -29,7 +29,7 @@ select case when variable_value-@c > 0 and variable_value-@c < 10000 then 'true'
--echo # 2PC enabled, MyRocks durability disabled
SET GLOBAL rocksdb_enable_2pc=0;
SET GLOBAL rocksdb_write_sync=0;
SET GLOBAL rocksdb_flush_log_at_trx_commit=0;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
......@@ -42,7 +42,7 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa
--echo # 2PC disabled, MyRocks durability enabled
SET GLOBAL rocksdb_enable_2pc=1;
SET GLOBAL rocksdb_write_sync=1;
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_wal_group_syncs';
--exec $MYSQL_SLAP --silent --concurrency=1 --number-of-queries=1000 --query="INSERT INTO t1 (id, value) VALUES(NULL, 1)"
......@@ -59,6 +59,6 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa
SET GLOBAL rocksdb_enable_2pc=1;
SET GLOBAL rocksdb_write_sync=0;
SET GLOBAL rocksdb_flush_log_at_trx_commit=1;
DROP TABLE t1;
DROP DATABASE mysqlslap;
......@@ -341,5 +341,3 @@ while ($i <= $max) {
#SHOW TABLE STATUS WHERE name LIKE 't1';
DROP TABLE t1;
--source include/have_rocksdb.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
# Test that fast secondary index creation updates cardinality properly
CREATE TABLE t1 (i INT PRIMARY KEY, j INT) ENGINE = ROCKSDB;
INSERT INTO t1 VALUES (1,2), (2,4), (3,6), (4,8), (5,10);
SET debug_sync= 'rocksdb.commit_in_place_alter_table WAIT_FOR flushed';
send ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
connect (con1,localhost,root,,);
# Flush memtable out to SST
SET GLOBAL rocksdb_force_flush_memtable_now = 1;
SET debug_sync= 'now SIGNAL flushed';
connection default;
reap;
# Return the data for the primary key of t1
--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 #
SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
WHERE INDEX_NUMBER =
(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
WHERE TABLE_NAME = 't1' AND INDEX_NAME = "PRIMARY");
# Return the data for the secondary index of t1
--replace_column 1 # 2 # 3 SSTNAME 5 # 6 # 7 # 8 # 9 #
SELECT * FROM INFORMATION_SCHEMA.ROCKSDB_INDEX_FILE_MAP
WHERE INDEX_NUMBER =
(SELECT INDEX_NUMBER FROM INFORMATION_SCHEMA.ROCKSDB_DDL
WHERE TABLE_NAME = 't1' AND INDEX_NAME = "kj");
disconnect con1;
SET debug_sync='RESET';
# cleanup
DROP TABLE t1;
!include suite/rpl/my.cnf
[mysqld.1]
sync_binlog=0
binlog_format=row
slave-exec-mode=strict
[mysqld.2]
sync_binlog=0
binlog_format=row
slave-exec-mode=strict
--source include/have_rocksdb.inc
source include/master-slave.inc;
connection master;
set @save_rocksdb_blind_delete_primary_key=@@session.rocksdb_blind_delete_primary_key;
set @save_rocksdb_master_skip_tx_api=@@session.rocksdb_master_skip_tx_api;
--disable_warnings
DROP TABLE IF EXISTS t1,t2;
--enable_warnings
create table t1 (id int primary key, value int, value2 varchar(200)) engine=rocksdb;
create table t2 (id int primary key, value int, value2 varchar(200), index(value)) engine=rocksdb;
--disable_query_log
let $t = 1;
while ($t <= 2) {
let $i = 1;
while ($i <= 10000) {
let $insert = INSERT INTO t$t VALUES($i, $i, REPEAT('x', 150));
inc $i;
eval $insert;
}
inc $t;
}
--enable_query_log
SET session rocksdb_blind_delete_primary_key=1;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
# Deleting 1000 rows from t1
--disable_query_log
let $i = 1;
while ($i <= 1000) {
let $insert = DELETE FROM t1 WHERE id=$i;
inc $i;
eval $insert;
}
--enable_query_log
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
SELECT count(*) FROM t1;
--source include/sync_slave_sql_with_master.inc
connection slave;
SELECT count(*) FROM t1;
connection master;
# Deleting 1000 rows from t2 (blind delete disabled because of secondary key)
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
--disable_query_log
let $i = 1;
while ($i <= 1000) {
let $insert = DELETE FROM t2 WHERE id=$i;
inc $i;
eval $insert;
}
--enable_query_log
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
SELECT count(*) FROM t2;
SET session rocksdb_master_skip_tx_api=1;
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
--disable_query_log
let $t = 1;
while ($t <= 2) {
let $i = 1001;
while ($i <= 2000) {
let $insert = DELETE FROM t$t WHERE id=$i;
inc $i;
eval $insert;
}
inc $t;
}
--enable_query_log
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
SELECT count(*) FROM t1;
SELECT count(*) FROM t2;
--source include/sync_slave_sql_with_master.inc
connection slave;
SELECT count(*) FROM t1;
SELECT count(*) FROM t2;
connection master;
# Range Deletes (blind delete disabled)
select variable_value into @c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
DELETE FROM t1 WHERE id BETWEEN 3001 AND 4000;
DELETE FROM t2 WHERE id BETWEEN 3001 AND 4000;
select variable_value-@c from information_schema.global_status where variable_name='rocksdb_rows_deleted_blind';
SELECT count(*) FROM t1;
SELECT count(*) FROM t2;
--source include/sync_slave_sql_with_master.inc
connection slave;
SELECT count(*) FROM t1;
SELECT count(*) FROM t2;
connection master;
# Deleting same keys (slaves stop)
DELETE FROM t1 WHERE id = 10;
SELECT count(*) FROM t1;
connection slave;
call mtr.add_suppression("Slave SQL.*Could not execute Delete_rows event on table test.t1.*Error_code.*");
call mtr.add_suppression("Slave: Can't find record in 't1'.*");
# wait until we have the expected error
--let $slave_sql_errno= convert_error(ER_KEY_NOT_FOUND)
--source include/wait_for_slave_sql_error.inc
connection slave;
set @save_rocksdb_read_free_rpl_tables=@@global.rocksdb_read_free_rpl_tables;
set global rocksdb_read_free_rpl_tables="t.*";
START SLAVE;
connection master;
--source include/sync_slave_sql_with_master.inc
connection slave;
SELECT count(*) FROM t1;
connection master;
# cleanup
connection slave;
set global rocksdb_read_free_rpl_tables=@save_rocksdb_read_free_rpl_tables;
connection master;
SET session rocksdb_blind_delete_primary_key=@save_rocksdb_blind_delete_primary_key;
SET session rocksdb_master_skip_tx_api=@save_rocksdb_master_skip_tx_api;
DROP TABLE t1, t2;
--source include/rpl_end.inc
--source include/have_rocksdb.inc
--disable_warnings
DROP TABLE IF EXISTS t1, t2;
DROP TABLE IF EXISTS t1, t2, t3;
--enable_warnings
# Create a table with a primary key and one secondary key as well as one
......@@ -25,7 +25,7 @@ CREATE TABLE t3(pk CHAR(5) PRIMARY KEY, a char(30), b char(30), key(a)) COLLATE
perl;
my $fn = $ENV{'ROCKSDB_INFILE'};
open(my $fh, '>>', $fn) || die "perl open($fn): $!";
my $max = 10000000;
my $max = 5000000;
my @chars = ("A".."Z", "a".."z", "0".."9");
my @lowerchars = ("a".."z");
my @powers_of_26 = (26 * 26 * 26 * 26, 26 * 26 * 26, 26 * 26, 26, 1);
......
......@@ -177,5 +177,8 @@ CREATE TABLE abc (id INT PRIMARY KEY, value varchar(50), index(value)) engine=ro
CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=rocksdb charset utf8;
DROP TABLE abc;
# test bad regex (null caused a crash) - Issue 493
SET GLOBAL rocksdb_strict_collation_exceptions=null;
# cleanup
SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value;
......@@ -37,7 +37,7 @@ SELECT * FROM t1;
SHOW SESSION STATUS LIKE 'Handler_read%';
FLUSH STATUS;
SELECT * FROM t1 WHERE b <=5 ORDER BY b;
SELECT * FROM t1 FORCE INDEX(b) WHERE b <=5 ORDER BY b;
SHOW SESSION STATUS LIKE 'Handler_read%';
FLUSH STATUS;
......@@ -50,4 +50,3 @@ SHOW SESSION STATUS LIKE 'Handler_read%';
# Cleanup
DROP TABLE t1;
......@@ -35,6 +35,6 @@ insert into linktable (id1, link_type, id2) values (2, 1, 9);
insert into linktable (id1, link_type, id2) values (2, 1, 10);
--replace_column 9 #
explain select id1, id2, link_type, data from linktable where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5);
explain select id1, id2, link_type, data from linktable force index(primary) where id1=2 and link_type=1 and (id2=1 or id2=2 or id2=3 or id2=4 or id2=5);
drop table linktable;
drop table if exists t;
create table t (
a int,
b int,
c varchar(12249) collate latin1_bin,
d datetime,
e int,
f int,
g blob,
h int,
i int,
key (b,e),
key (h,b)
) engine=rocksdb
partition by linear hash (i) partitions 8 ;
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
insert into t values (1,1,'a',now(),1,1,'a',1,1);
select i from t group by h;
select i from t group by h;
drop table t;
......@@ -51,6 +51,9 @@ SET GLOBAL default_storage_engine=rocksdb;
--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key test
--exec grep "START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT" $MYSQLTEST_VARDIR/mysqld.1/mysqld.log | wc -l
# Sanity test mysqldump when the --innodb-stats-on-metadata is specified (no effect)
--echo ==== mysqldump with --innodb-stats-on-metadata ====
--exec $MYSQL_DUMP --skip-comments --single-transaction --master-data=2 --print-ordering-key --innodb-stats-on-metadata test
# wiping general log so that this test case doesn't fail with --repeat
--exec echo "" > $MYSQLTEST_VARDIR/mysqld.1/mysqld.log
......
......@@ -11,7 +11,7 @@ DROP TABLE IF EXISTS t1;
# restart server with correct parameters
shutdown_server 10;
--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size=1000000000" >$_expect_file_name
--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size_mb=100" >$_expect_file_name
--sleep 5
--enable_reconnect
--source include/wait_until_connected_again.inc
......@@ -29,7 +29,7 @@ select * from t1 where a = 1;
# restart server to re-read cache
--exec echo "wait" >$_expect_file_name
shutdown_server 10;
--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size=1000000000" >$_expect_file_name
--exec echo "restart:--rocksdb_persistent_cache_path=$_cache_file_name --rocksdb_persistent_cache_size_mb=100" >$_expect_file_name
--sleep 5
--enable_reconnect
--source include/wait_until_connected_again.inc
......
......@@ -792,6 +792,20 @@ insert into t47 values (2, 'row2');
set rocksdb_bulk_load=1;
insert into t47 values (3, 'row3'),(4, 'row4');
set rocksdb_bulk_load=0;
# Check concurrent bulk loading
--connect (con1,localhost,root,,)
set rocksdb_bulk_load=1;
insert into t47 values (10, 'row10'),(11, 'row11');
--connection default
set rocksdb_bulk_load=1;
insert into t47 values (100, 'row100'),(101, 'row101');
--disconnect con1
--connection default
set rocksdb_bulk_load=0;
--disable_query_log
let $wait_condition = select count(*) = 8 as c from t47;
--source include/wait_condition.inc
--enable_query_log
select * from t47;
drop table t47;
......
--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates
--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates --rocksdb_enable_2pc=OFF
--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates
--gtid_mode=ON --enforce_gtid_consistency --log_slave_updates --rocksdb_enable_2pc=OFF
--sync_binlog=1000 --relay_log_recovery=1
--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates --rocksdb_write_sync=ON --rocksdb_write_disable_wal=OFF
--gtid_mode=ON --enforce_gtid_consistency --log_bin --log_slave_updates --rocksdb_flush_log_at_trx_commit=1 --rocksdb_write_disable_wal=OFF
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment