Commit 865831c6 authored by Sergei Petrunia's avatar Sergei Petrunia

Merge branch 'bb-10.2-mariarocks-merge' into 10.2

parents c8a3c2bc 52e0dee0
......@@ -18,6 +18,11 @@
# Optionally, SEARCH_ABORT can be set to "FOUND" or "NOT FOUND" and this
# will abort if the search result doesn't match the requested one.
#
# Optionally, SEARCH_OUTPUT can be set to control the format of output.
# Supported formats:
# - (default) : "FOUND n /pattern/ in FILE " or "NOT FOUND ..."
# - "matches" : Each match is printed, on a separate line
#
# In case of
# - SEARCH_FILE and/or SEARCH_PATTERN is not set
# - SEARCH_FILE cannot be opened
......@@ -75,7 +80,14 @@ perl;
my @matches=($content =~ m/$search_pattern/gs);
my $res=@matches ? "FOUND " . scalar(@matches) : "NOT FOUND";
$ENV{SEARCH_FILE} =~ s{^.*?([^/\\]+)$}{$1};
print "$res /$search_pattern/ in $ENV{SEARCH_FILE}\n";
if ($ENV{SEARCH_OUTPUT} eq "matches") {
foreach (@matches) {
print $_ . "\n";
}
} else {
print "$res /$search_pattern/ in $ENV{SEARCH_FILE}\n";
}
die "$ENV{SEARCH_ABORT}\n"
if $ENV{SEARCH_ABORT} && $res =~ /^$ENV{SEARCH_ABORT}/;
if $ENV{SEARCH_ABORT} && $res =~ /^$ENV{SEARCH_ABORT}/;
EOF
......@@ -43,6 +43,15 @@ IF (WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 4)
SKIP_ROCKSDB_PLUGIN("32-Bit Windows are temporarily disabled")
ENDIF()
#
# RocksDB's port/win/env_win.cc uses parts of Windows API that are not part
# of the LEAN_AND_MEAN set. Ideally we should undef LEAN_AND_MEAN only for
# that file, but REMOVE_DEFINITIONS only works per-directory?
#
IF (WIN32)
REMOVE_DEFINITIONS(-DWIN32_LEAN_AND_MEAN)
ENDIF()
# This plugin needs recent C++ compilers (it is using C++11 features)
# Skip build for the old compilers
SET(CXX11_FLAGS)
......@@ -155,6 +164,9 @@ if (UNIX AND NOT APPLE)
endif()
TARGET_LINK_LIBRARIES(rocksdb rocksdb_aux_lib)
FIND_LIBRARY(LZ4_LIBRARY
NAMES liblz4${PIC_EXT}.a lz4
HINTS ${WITH_LZ4}/lib)
IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
......@@ -182,8 +194,11 @@ IF(HAVE_SCHED_GETCPU)
ENDIF()
IF (WITH_TBB)
FIND_LIBRARY(TBB_LIBRARY
NAMES libtbb${PIC_EXT}.a tbb
HINTS ${WITH_TBB}/lib)
SET(rocksdb_static_libs ${rocksdb_static_libs}
${WITH_TBB}/lib/libtbb${PIC_EXT}.a)
${TBB_LIBRARY})
ADD_DEFINITIONS(-DTBB)
ENDIF()
......
......@@ -178,6 +178,7 @@ set(ROCKSDB_SOURCES
db/db_info_dumper.cc
db/db_iter.cc
db/dbformat.cc
db/error_handler.cc
db/event_helpers.cc
db/experimental.cc
db/external_sst_file_ingestion_job.cc
......@@ -188,6 +189,7 @@ set(ROCKSDB_SOURCES
db/internal_stats.cc
db/log_reader.cc
db/log_writer.cc
db/logs_with_prep_tracker.cc
db/malloc_stats.cc
db/managed_iterator.cc
db/memtable.cc
......@@ -284,6 +286,7 @@ set(ROCKSDB_SOURCES
util/coding.cc
util/compaction_job_stats_impl.cc
util/comparator.cc
util/compression_context_cache.cc
util/concurrent_arena.cc
util/crc32c.cc
util/delete_scheduler.cc
......@@ -304,6 +307,7 @@ set(ROCKSDB_SOURCES
util/status_message.cc
util/string_util.cc
util/sync_point.cc
util/sync_point_impl.cc
util/testutil.cc
util/thread_local.cc
util/threadpool_imp.cc
......@@ -352,6 +356,8 @@ set(ROCKSDB_SOURCES
utilities/transactions/transaction_util.cc
utilities/transactions/write_prepared_txn.cc
utilities/transactions/write_prepared_txn_db.cc
utilities/transactions/write_unprepared_txn.cc
utilities/transactions/write_unprepared_txn_db.cc
utilities/ttl/db_ttl_impl.cc
utilities/write_batch_with_index/write_batch_with_index.cc
utilities/write_batch_with_index/write_batch_with_index_internal.cc
......
This diff is collapsed.
......@@ -107,6 +107,7 @@ struct Rdb_deadlock_info {
std::string table_name;
};
std::vector <Rdb_dl_trx_info> path;
int64_t deadlock_time;
ulonglong victim_trx_id;
};
......@@ -147,6 +148,11 @@ const char *const BG_THREAD_NAME = "myrocks-bg";
*/
const char *const INDEX_THREAD_NAME = "myrocks-index";
/*
Name for the manual compaction thread.
*/
const char *const MANUAL_COMPACTION_THREAD_NAME = "myrocks-mc";
/*
Separator between partition name and the qualifier. Sample usage:
......@@ -503,6 +509,12 @@ class ha_rocksdb : public my_core::handler {
const rocksdb::Snapshot *m_scan_it_snapshot;
/* Buffers used for upper/lower bounds for m_scan_it. */
uchar *m_scan_it_lower_bound;
uchar *m_scan_it_upper_bound;
rocksdb::Slice m_scan_it_lower_bound_slice;
rocksdb::Slice m_scan_it_upper_bound_slice;
Rdb_tbl_def *m_tbl_def;
/* Primary Key encoder from KeyTupleFormat to StorageFormat */
......@@ -566,12 +578,6 @@ class ha_rocksdb : public my_core::handler {
uchar *m_dup_sk_packed_tuple;
uchar *m_dup_sk_packed_tuple_old;
/* Buffers used for passing upper/bound eq conditions. */
uchar *m_eq_cond_lower_bound;
uchar *m_eq_cond_upper_bound;
rocksdb::Slice m_eq_cond_lower_bound_slice;
rocksdb::Slice m_eq_cond_upper_bound_slice;
/*
Temporary space for packing VARCHARs (we provide it to
pack_record()/pack_index_tuple() calls).
......@@ -653,21 +659,20 @@ class ha_rocksdb : public my_core::handler {
enum ha_rkey_function find_flag) const
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
void setup_iterator_bounds(const Rdb_key_def &kd,
const rocksdb::Slice &eq_cond,
uchar *lower_bound_buf,
uchar *upper_bound_buf,
rocksdb::Slice *out_lower_bound,
rocksdb::Slice *out_upper_bound);
const rocksdb::Slice &eq_cond, size_t bound_len,
uchar *const lower_bound, uchar *const upper_bound,
rocksdb::Slice *lower_bound_slice,
rocksdb::Slice *upper_bound_slice);
bool can_use_bloom_filter(THD *thd, const Rdb_key_def &kd,
const rocksdb::Slice &eq_cond,
const bool use_all_keys);
bool check_bloom_and_set_bounds(THD *thd, const Rdb_key_def &kd,
const rocksdb::Slice &eq_cond,
const bool use_all_keys,
uchar *lower_bound_buf,
uchar *upper_bound_buf,
rocksdb::Slice *out_lower_bound,
rocksdb::Slice *out_upper_bound);
const bool use_all_keys, size_t bound_len,
uchar *const lower_bound,
uchar *const upper_bound,
rocksdb::Slice *lower_bound_slice,
rocksdb::Slice *upper_bound_slice);
void setup_scan_iterator(const Rdb_key_def &kd, rocksdb::Slice *slice,
const bool use_all_keys, const uint eq_cond_len)
MY_ATTRIBUTE((__nonnull__));
......@@ -1053,6 +1058,7 @@ class ha_rocksdb : public my_core::handler {
}
virtual double read_time(uint, uint, ha_rows rows) override;
virtual void print_error(int error, myf errflag) override;
int open(const char *const name, int mode, uint test_if_locked) override
MY_ATTRIBUTE((__warn_unused_result__));
......@@ -1167,8 +1173,8 @@ class ha_rocksdb : public my_core::handler {
MY_ATTRIBUTE((__nonnull__));
int compare_key_parts(const KEY *const old_key,
const KEY *const new_key) const;
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
const KEY *const new_key) const
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
int compare_keys(const KEY *const old_key, const KEY *const new_key) const
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
......@@ -1223,7 +1229,7 @@ class ha_rocksdb : public my_core::handler {
int update_pk(const Rdb_key_def &kd, const struct update_row_info &row_info,
const bool &pk_changed) MY_ATTRIBUTE((__warn_unused_result__));
int update_sk(const TABLE *const table_arg, const Rdb_key_def &kd,
const struct update_row_info &row_info)
const struct update_row_info &row_info, const bool bulk_load_sk)
MY_ATTRIBUTE((__warn_unused_result__));
int update_indexes(const struct update_row_info &row_info,
const bool &pk_changed)
......@@ -1277,7 +1283,9 @@ class ha_rocksdb : public my_core::handler {
int finalize_bulk_load(bool print_client_error = true)
MY_ATTRIBUTE((__warn_unused_result__));
public:
int calculate_stats_for_table() MY_ATTRIBUTE((__warn_unused_result__));
public:
int index_init(uint idx, bool sorted) override
MY_ATTRIBUTE((__warn_unused_result__));
int index_end() override MY_ATTRIBUTE((__warn_unused_result__));
......@@ -1370,9 +1378,6 @@ class ha_rocksdb : public my_core::handler {
MY_ATTRIBUTE((__warn_unused_result__));
int analyze(THD *const thd, HA_CHECK_OPT *const check_opt) override
MY_ATTRIBUTE((__warn_unused_result__));
int calculate_stats(const TABLE *const table_arg, THD *const thd,
HA_CHECK_OPT *const check_opt)
MY_ATTRIBUTE((__warn_unused_result__));
enum_alter_inplace_result check_if_supported_inplace_alter(
TABLE *altered_table,
......@@ -1402,7 +1407,7 @@ class ha_rocksdb : public my_core::handler {
virtual void rpl_after_delete_rows() override;
virtual void rpl_before_update_rows() override;
virtual void rpl_after_update_rows() override;
virtual bool use_read_free_rpl();
virtual bool use_read_free_rpl() override;
#endif // MARIAROCKS_NOT_YET
private:
......
......@@ -39,7 +39,12 @@ enum RDB_IO_ERROR_TYPE {
const char *get_rdb_io_error_string(const RDB_IO_ERROR_TYPE err_type);
void rdb_handle_io_error(const rocksdb::Status status,
const RDB_IO_ERROR_TYPE err_type);
const RDB_IO_ERROR_TYPE err_type)
#if defined(__clang__)
MY_ATTRIBUTE((optnone));
#else
MY_ATTRIBUTE((optimize("O0")));
#endif
int rdb_normalize_tablename(const std::string &tablename, std::string *str)
MY_ATTRIBUTE((__nonnull__, __warn_unused_result__));
......
[write_committed]
loose-rocksdb_write_policy=write_committed
[write_prepared]
loose-rocksdb_write_policy=write_prepared
loose-rocksdb_commit_time_batch_for_recovery=on
if (`select count(*) = 0 from information_schema.session_variables where variable_name = 'rocksdb_write_policy' and variable_value = 'write_committed';`) {
--skip Test requires write_committed policy
}
......@@ -299,11 +299,13 @@ connection con1;
show global variables like 'rocksdb_bulk_load%';
Variable_name Value
rocksdb_bulk_load ON
rocksdb_bulk_load_allow_sk OFF
rocksdb_bulk_load_allow_unsorted OFF
rocksdb_bulk_load_size 1000
show session variables like 'rocksdb_bulk_load%';
Variable_name Value
rocksdb_bulk_load ON
rocksdb_bulk_load_allow_sk OFF
rocksdb_bulk_load_allow_unsorted OFF
rocksdb_bulk_load_size 1000
CREATE TABLE t1 (i INT, j INT, PRIMARY KEY (i)) ENGINE = ROCKSDB;
......@@ -356,6 +358,7 @@ SET session rocksdb_merge_buf_size = 340;
show variables like 'rocksdb_bulk_load%';
Variable_name Value
rocksdb_bulk_load OFF
rocksdb_bulk_load_allow_sk OFF
rocksdb_bulk_load_allow_unsorted OFF
rocksdb_bulk_load_size 1000
CREATE TABLE t1 (a VARCHAR(80)) ENGINE=RocksDB;
......@@ -463,3 +466,24 @@ t1 CREATE TABLE `t1` (
KEY `kb` (`b`(8))
) ENGINE=ROCKSDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin
DROP TABLE t1;
SET @prior_rocksdb_table_stats_sampling_pct = @@rocksdb_table_stats_sampling_pct;
set global rocksdb_table_stats_sampling_pct = 100;
CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB;
INSERT INTO t1 (a, b) VALUES (1, 10);
INSERT INTO t1 (a, b) VALUES (2, 10);
INSERT INTO t1 (a, b) VALUES (3, 20);
INSERT INTO t1 (a, b) VALUES (4, 20);
set global rocksdb_force_flush_memtable_now=1;
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
SHOW INDEX in t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 0 PRIMARY 1 a A 4 NULL NULL LSMTREE
ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
SHOW INDEX in t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 0 PRIMARY 1 a A 4 NULL NULL LSMTREE
t1 1 kb 1 b A 2 NULL NULL YES LSMTREE
DROP TABLE t1;
SET global rocksdb_table_stats_sampling_pct = @prior_rocksdb_table_stats_sampling_pct;
......@@ -17,7 +17,7 @@ ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
set @tmp= @@rocksdb_max_row_locks;
set session rocksdb_max_row_locks=1000;
ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY;
ERROR HY000: Status error 10 received from RocksDB: Operation aborted: Failed to acquire lock due to max_num_locks limit
ERROR HY000: Got error 10 'Operation aborted: Failed to acquire lock due to max_num_locks limit' from ROCKSDB
set session rocksdb_bulk_load=1;
ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY;
set session rocksdb_bulk_load=0;
......
......@@ -59,12 +59,10 @@ insert into t values ();
set debug_dbug="+d,crash_commit_before";
commit;
ERROR HY000: Lost connection to MySQL server during query
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
table_schema table_name auto_increment
test t 4
select max(i) from t;
max(i)
3
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
table_schema table_name auto_increment > @row_max
test t 1
# After engine prepare
begin;
insert into t values ();
......@@ -72,12 +70,10 @@ insert into t values ();
set debug_dbug="+d,crash_commit_after_prepare";
commit;
ERROR HY000: Lost connection to MySQL server during query
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
table_schema table_name auto_increment
test t 4
select max(i) from t;
max(i)
3
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
table_schema table_name auto_increment > @row_max
test t 1
# After binlog
begin;
insert into t values ();
......@@ -85,12 +81,10 @@ insert into t values ();
set debug_dbug="+d,crash_commit_after_log";
commit;
ERROR HY000: Lost connection to MySQL server during query
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
table_schema table_name auto_increment
test t 6
select max(i) from t;
max(i)
5
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
table_schema table_name auto_increment > @row_max
test t 1
# After everything
begin;
insert into t values ();
......@@ -98,10 +92,8 @@ insert into t values ();
set debug_dbug="+d,crash_commit_after";
commit;
ERROR HY000: Lost connection to MySQL server during query
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
table_schema table_name auto_increment
test t 8
select max(i) from t;
max(i)
7
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
table_schema table_name auto_increment > @row_max
test t 1
drop table t;
......@@ -158,3 +158,21 @@ INSERT INTO t1 (a) VALUES (1);
UPDATE t1 SET pk = 3;
ALTER TABLE t1 AUTO_INCREMENT 2;
DROP TABLE t1;
#----------------------------------
# Issue #792 Crash in autoincrement
#----------------------------------
CREATE TABLE t1(C1 DOUBLE AUTO_INCREMENT KEY,C2 CHAR) ENGINE=ROCKSDB;
INSERT INTO t1 VALUES(2177,0);
DROP TABLE t1;
CREATE TABLE t0(c0 BLOB) ENGINE=ROCKSDB;
INSERT INTO t0 VALUES(0);
ALTER TABLE t0 AUTO_INCREMENT=0;
DROP TABLE t0;
#----------------------------------
# Issue #869 Crash in autoincrement
#----------------------------------
CREATE TABLE t1 (pk INT AUTO_INCREMENT, a INT, PRIMARY KEY(pk)) ENGINE=RocksDB;
INSERT INTO t1 (a) VALUES (1);
UPDATE t1 SET pk = 3;
ALTER TABLE t1 AUTO_INCREMENT 2;
DROP TABLE t1;
create table r1 (id bigint primary key, value bigint) engine=rocksdb;
create table r2 (id bigint, value bigint, primary key (id) comment 'cf2') engine=rocksdb;
set session rocksdb_bulk_load=1;
set session rocksdb_bulk_load=0;
select variable_value into @h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
insert into r1 values (100, 100);
select variable_value-@h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
variable_value-@h
1
select variable_value into @h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
insert into r2 values (100, 100);
select variable_value-@h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
variable_value-@h
0
DROP TABLE r1, r2;
SET rocksdb_bulk_load_size=15;
CREATE TABLE t4 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
CREATE TABLE t3 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
CREATE TABLE t2 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
CREATE TABLE t1 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
SET rocksdb_bulk_load=1;
INSERT INTO t1 SELECT * FROM t3 FORCE INDEX (PRIMARY) ORDER BY a;
SELECT count(*) FROM t1 FORCE INDEX (PRIMARY);
count(*)
0
SELECT count(*) FROM t1 FORCE INDEX (b);
count(*)
10
SELECT count(*) FROM t1 FORCE INDEX (c);
count(*)
10
SET rocksdb_bulk_load=0;
SELECT * FROM t1 FORCE INDEX (PRIMARY);
a b c
-9 11 11
-7 9 9
-5 7 7
-3 5 5
-1 3 3
2 0 0
4 -2 -2
6 -4 -4
8 -6 -6
10 -8 -8
SELECT b FROM t1 FORCE INDEX (b);
b
-8
-6
-4
-2
0
3
5
7
9
11
SELECT c FROM t1 FORCE INDEX (c);
c
-8
-6
-4
-2
0
3
5
7
9
11
Checksums should match
CHECKSUM TABLE t3;
Table Checksum
test.t3 3862424802
CHECKSUM TABLE t1;
Table Checksum
test.t1 3862424802
SET rocksdb_bulk_load_allow_sk=1;
SET rocksdb_bulk_load=1;
INSERT INTO t4 SELECT * FROM t3 FORCE INDEX (PRIMARY) ORDER BY a;
SELECT count(*) FROM t4 FORCE INDEX (PRIMARY);
count(*)
0
SELECT count(*) FROM t4 FORCE INDEX (b);
count(*)
0
SELECT count(*) FROM t4 FORCE INDEX (c);
count(*)
0
SET rocksdb_bulk_load=0;
SELECT * FROM t4 FORCE INDEX (PRIMARY);
a b c
-9 11 11
-7 9 9
-5 7 7
-3 5 5
-1 3 3
2 0 0
4 -2 -2
6 -4 -4
8 -6 -6
10 -8 -8
SELECT b FROM t4 FORCE INDEX (b);
b
-8
-6
-4
-2
0
3
5
7
9
11
SELECT c FROM t4 FORCE INDEX (c);
c
-8
-6
-4
-2
0
3
5
7
9
11
Checksums should match
CHECKSUM TABLE t3;
Table Checksum
test.t3 3862424802
CHECKSUM TABLE t4;
Table Checksum
test.t4 3862424802
SET rocksdb_bulk_load_allow_unsorted=1;
SET rocksdb_bulk_load_allow_sk=1;
SET rocksdb_bulk_load=1;
INSERT INTO t2 SELECT * FROM t3 WHERE b >= 0 ORDER BY b;
INSERT INTO t2 SELECT * FROM t3 WHERE b < 0 ORDER BY b;
SELECT count(*) FROM t2 FORCE INDEX (PRIMARY);
count(*)
0
SELECT count(*) FROM t2 FORCE INDEX (b);
count(*)
0
SELECT count(*) FROM t2 FORCE INDEX (c);
count(*)
0
SELECT count(*) FROM t2 FORCE INDEX (PRIMARY);
count(*)
0
SELECT count(*) FROM t2 FORCE INDEX (b);
count(*)
0
SELECT count(*) FROM t2 FORCE INDEX (c);
count(*)
0
SET rocksdb_bulk_load=0;
SELECT * FROM t2 FORCE INDEX (PRIMARY);
a b c
-19 21 21
-17 19 19
-15 17 17
-13 15 15
-11 13 13
-9 11 11
-7 9 9
-5 7 7
-3 5 5
-1 3 3
2 0 0
4 -2 -2
6 -4 -4
8 -6 -6
10 -8 -8
12 -10 -10
14 -12 -12
16 -14 -14
18 -16 -16
20 -18 -18
SELECT b FROM t2 FORCE INDEX (b);
b
-18
-16
-14
-12
-10
-8
-6
-4
-2
0
3
5
7
9
11
13
15
17
19
21
SELECT c FROM t2 FORCE INDEX (c);
c
-18
-16
-14
-12
-10
-8
-6
-4
-2
0
3
5
7
9
11
13
15
17
19
21
Checksums should match
CHECKSUM TABLE t3;
Table Checksum
test.t3 1495594118
CHECKSUM TABLE t2;
Table Checksum
test.t2 1495594118
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
DROP TABLE t4;
......@@ -82,4 +82,19 @@ t1 1 t1_5 2 c1 A 100000 NULL NULL YES LSMTREE
SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE();
table_name table_rows
t1 100000
drop table t1;
CREATE TABLE t2 (a INT, b INT, c INT, d INT, e INT, f INT, g INT,
PRIMARY KEY (a), KEY (c, b, a, d, e, f, g))
ENGINE=ROCKSDB;
SET GLOBAL rocksdb_force_flush_memtable_now = 1;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
test.t2 analyze status OK
cardinality of the columns after 'a' must be equal to the cardinality of column 'a'
SELECT CARDINALITY INTO @c FROM information_schema.statistics WHERE TABLE_NAME='t2' AND INDEX_NAME='c' AND COLUMN_NAME='a';
SELECT COLUMN_NAME, CARDINALITY = @c FROM information_schema.statistics WHERE TABLE_NAME='t2' AND INDEX_NAME='c' AND SEQ_IN_INDEX > 3;
COLUMN_NAME CARDINALITY = @c
d 1
e 1
f 1
g 1
drop table t1, t2;
SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
DROP TABLE IF EXISTS t1;
call mtr.add_suppression("Invalid pattern");
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text) engine=rocksdb charset utf8;
ALTER TABLE t1 ADD INDEX (value);
ERROR HY000: Unsupported collation on string indexed column test.t1.value Use binary collation (binary, latin1_bin, utf8_bin).
DROP TABLE t1;
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value)) engine=rocksdb charset utf8;
ERROR HY000: Unsupported collation on string indexed column test.t1.value Use binary collation (latin1_bin, binary, utf8_bin).
......@@ -13,6 +14,7 @@ SET GLOBAL rocksdb_strict_collation_check=1;
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value2)) engine=rocksdb charset utf8;
DROP TABLE t1;
CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset latin1 collate latin1_bin;
ALTER TABLE t1 collate=latin1_general_ci;
DROP TABLE t1;
CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset utf8 collate utf8_bin;
DROP TABLE t1;
......@@ -127,4 +129,16 @@ CREATE TABLE abcd (id INT PRIMARY KEY, value varchar(50), index(value)) engine=r
ERROR HY000: Unsupported collation on string indexed column test.abcd.value Use binary collation (latin1_bin, binary, utf8_bin).
DROP TABLE abc;
SET GLOBAL rocksdb_strict_collation_exceptions=null;
SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value;
SET GLOBAL rocksdb_strict_collation_check=1;
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value)) engine=rocksdb charset utf8;
Warnings:
Warning 1210 Unsupported collation on string indexed column test.t1.value Use binary collation (binary, latin1_bin, utf8_bin).
DROP TABLE t1;
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text) engine=rocksdb charset utf8;
ALTER TABLE t1 ADD INDEX (value);
Warnings:
Warning 1210 Unsupported collation on string indexed column test.t1.value Use binary collation (binary, latin1_bin, utf8_bin).
DROP TABLE t1;
CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset latin1 collate latin1_bin;
ALTER TABLE t1 collate=latin1_general_ci;
DROP TABLE t1;
CREATE DATABASE db_rpc;
USE db_rpc;
CREATE TABLE t1(pk INT PRIMARY KEY) ENGINE=rocksdb;
SET GLOBAL rocksdb_enable_2pc=1;
SET autocommit = 0;
SET autocommit = 0;
BEGIN;
BEGIN;
SELECT * from t1;
pk
SELECT * from t1;
pk
INSERT INTO t1 VALUES(1);
INSERT INTO t1 VALUES(2);
COMMIT;
COMMIT;
SELECT * from db_rpc.t1;
pk
1
2
DROP DATABASE db_rpc;
USE mysql;
CREATE TABLE mysql_table (a INT) ENGINE=ROCKSDB;
CREATE TABLE test.mysql_table (a INT) ENGINE=ROCKSDB;
ERROR HY000: Table without primary key cannot be created outside mysql schema.
USE test;
CREATE TABLE mysql_table (a INT) ENGINE=ROCKSDB;
ERROR HY000: Table without primary key cannot be created outside mysql schema.
CREATE TABLE IF NOT EXISTS mysql_table_2 (a INT) ENGINE=ROCKSDB;
ERROR HY000: Table without primary key cannot be created outside mysql schema.
CREATE TABLE mysql_table_no_cols ENGINE=ROCKSDB;
ERROR HY000: Table without primary key cannot be created outside mysql schema.
CREATE TABLE mysql.mysql_table_2 (a INT) ENGINE=ROCKSDB;
CREATE TABLE mysql_primkey (a INT PRIMARY KEY, b INT, c INT, d INT, INDEX (c)) ENGINE=ROCKSDB;
ALTER TABLE mysql_primkey DROP b, DROP a, ADD (f INT PRIMARY KEY);
ALTER TABLE mysql_primkey DROP PRIMARY KEY;
ERROR HY000: Table without primary key cannot be created outside mysql schema.
CREATE TABLE mysql_primkey2 (a INT PRIMARY KEY, b INT, c INT) ENGINE=ROCKSDB;
ALTER TABLE mysql_primkey2 DROP b;
ALTER TABLE mysql_primkey2 ADD (b INT);
ALTER TABLE mysql_primkey2 DROP c, DROP A;
ERROR HY000: Table without primary key cannot be created outside mysql schema.
CREATE TABLE mysql_primkey3 (a INT PRIMARY KEY, b INT, c INT, INDEX indexonb (b), INDEX indexonc (c)) ENGINE=ROCKSDB;
ALTER TABLE mysql_primkey3 DROP INDEX indexonb;
ALTER TABLE mysql_primkey3 DROP c;
ALTER TABLE mysql_primkey3 DROP PRIMARY KEY, ADD PRIMARY KEY(b);
CREATE TABLE mysql_primkey4(a INT, b INT, PRIMARY KEY(a), INDEX si (a, b)) ENGINE=ROCKSDB;
DROP INDEX si ON mysql_primkey4;
DROP INDEX `PRIMARY` ON mysql_primkey4;
ERROR HY000: Table without primary key cannot be created outside mysql schema.
ALTER TABLE mysql.mysql_table ADD PRIMARY KEY (a);
ALTER TABLE mysql.mysql_table DROP PRIMARY KEY;
DROP TABLE mysql_primkey;
DROP TABLE mysql_primkey2;
DROP TABLE mysql_primkey3;
DROP TABLE mysql_primkey4;
USE mysql;
DROP TABLE mysql_table;
DROP TABLE mysql_table_2;
......@@ -45,7 +45,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
alter table t1 modify i bigint;;
set high_priority_ddl = 0;
......@@ -98,7 +98,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
alter table t1 rename t1_new;;
set high_priority_ddl = 0;
......@@ -152,7 +152,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
drop table t1;;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
......@@ -202,7 +202,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
drop table t1;;
set high_priority_ddl = 0;
......@@ -251,7 +251,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
alter table t1 modify i bigint;;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
......@@ -302,7 +302,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
create index idx1 on t1 (i);;
set high_priority_ddl = 0;
......@@ -342,7 +342,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
drop index idx1 on t1;;
set high_priority_ddl = 0;
......@@ -390,7 +390,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
truncate t1;;
set high_priority_ddl = 0;
......@@ -438,7 +438,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
create trigger ins_sum before insert on t1 for each row set @sum = @sum + new.i;;
set high_priority_ddl = 0;
......@@ -478,7 +478,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
drop trigger ins_sum;;
set high_priority_ddl = 0;
......@@ -528,7 +528,7 @@ set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
lock tables t1 write;
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
optimize table t1;;
Table Op Msg_type Msg_text
......@@ -538,6 +538,55 @@ connection: default (for show processlist)
show processlist;
Id User Host db Command Time State Info Rows examined Rows sent Tid Srv_Id
<Id> root <Host> test <Command> <Time> <State> <Info> <RExam> <RSent> <TID> 0
## Test parameters:
## use_sys_var = 1;
## con_block = con1
## con_kill = default
## cmd = lock tables t1 write;
## high_priority_cmd = optimize high_priority table t1;
## should_kill = 1
## recreate_table = 1
## throw_error = 1
drop table if exists t1;
create table t1 (i int);
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`i` int(11) DEFAULT NULL
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
insert into t1 values (1), (2), (3);
connection: con1
lock tables t1 read;;
connection: default
set lock_wait_timeout = 0.02;
set high_priority_lock_wait_timeout = 0.02;
describe t1;
Field Type Null Key Default Extra
i int(11) YES NULL
connection: default (for show processlist)
# both con1 and default exist
show processlist;
Id User Host db Command Time State Info Rows examined Rows sent Tid Srv_Id
<Id> root <Host> test <Command> <Time> <State> <Info> <RExam> <RSent> <TID> 0
<Id> test_user1 <Host> test <Command> <Time> <State> <Info> <RExam> <RSent> <TID> 0
connection: default
lock tables t1 write;;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
set high_priority_ddl = 1;
select @@high_priority_ddl;
@@high_priority_ddl
1
rename table t1 to t2;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction: Timeout on table metadata: test.t1
lock tables t1 write;;
set high_priority_ddl = 0;
connection: default (for show processlist)
show processlist;
Id User Host db Command Time State Info Rows examined Rows sent Tid Srv_Id
<Id> root <Host> test <Command> <Time> <State> <Info> <RExam> <RSent> <TID> 0
unlock tables;
drop user test_user1@localhost;
drop user test_user2@localhost;
drop table if exists t1;
......
......@@ -53,6 +53,7 @@ LIST OF SNAPSHOTS FOR EACH SESSION:
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -60,6 +61,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -102,6 +104,7 @@ LIST OF SNAPSHOTS FOR EACH SESSION:
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -109,6 +112,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -120,6 +124,7 @@ TABLE NAME: test.t
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -127,6 +132,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -170,6 +176,7 @@ LIST OF SNAPSHOTS FOR EACH SESSION:
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -177,6 +184,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -188,6 +196,7 @@ TABLE NAME: test.t
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -195,6 +204,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -206,6 +216,7 @@ TABLE NAME: test.t
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -213,6 +224,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -240,6 +252,7 @@ LIST OF SNAPSHOTS FOR EACH SESSION:
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -247,6 +260,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -352,6 +366,7 @@ LIST OF SNAPSHOTS FOR EACH SESSION:
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -359,6 +374,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: PRIMARY
TABLE NAME: test.t
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -373,6 +389,25 @@ TABLE NAME: test.t
END OF ROCKSDB TRANSACTION MONITOR OUTPUT
=========================================
Deadlock #6
create table t1 (id int primary key, value int) engine=rocksdb;
insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5);
begin;
update t1 set value=value+100 where id=1;
update t1 set value=value+100 where id=2;
begin;
update t1 set value=value+200 where id=3;
update t1 set value=value+100 where id=3;
update t1 set value=value+200 where id=1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
select * from t1;
id value
1 101
2 102
3 103
4 4
5 5
drop table t1;
set global rocksdb_lock_wait_timeout = @prior_lock_wait_timeout;
set global rocksdb_deadlock_detect = @prior_deadlock_detect;
drop table t;
......@@ -390,6 +425,27 @@ LIST OF SNAPSHOTS FOR EACH SESSION:
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
LOCK TYPE: EXCLUSIVE
INDEX NAME: NOT FOUND; IDX_ID
TABLE NAME: NOT FOUND; IDX_ID
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
LOCK TYPE: EXCLUSIVE
INDEX NAME: NOT FOUND; IDX_ID
TABLE NAME: NOT FOUND; IDX_ID
--------TXN_ID GOT DEADLOCK---------
*** DEADLOCK PATH
=========================================
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......@@ -397,6 +453,7 @@ LOCK TYPE: EXCLUSIVE
INDEX NAME: NOT FOUND; IDX_ID
TABLE NAME: NOT FOUND; IDX_ID
---------------WAITING FOR---------------
TSTAMP
TXN_ID
COLUMN FAMILY NAME: default
KEY
......
......@@ -45,6 +45,25 @@ DELETE FROM t4;
drop table t3;
DELETE FROM t1;
DELETE FROM t4;
SET GLOBAL rocksdb_max_manual_compactions = 2;
SET GLOBAL rocksdb_debug_manual_compaction_delay = 3600;
connect con1, localhost, root,,;
connect con2, localhost, root,,;
connect con3, localhost, root,,;
connection con1;
SET GLOBAL rocksdb_compact_cf='cf1';
connection con2;
SET GLOBAL rocksdb_compact_cf='rev:cf2';
connection default;
select * from information_schema.global_status where variable_name='rocksdb_manual_compactions_running';
VARIABLE_NAME VARIABLE_VALUE
ROCKSDB_MANUAL_COMPACTIONS_RUNNING 1
connection con3;
SET GLOBAL rocksdb_compact_cf='cf1';
ERROR HY000: Internal error: Can't schedule more manual compactions. Increase rocksdb_max_manual_compactions or stop issuing more manual compactions.
SET GLOBAL rocksdb_compact_cf='rev:cf2';
ERROR HY000: Internal error: Can't schedule more manual compactions. Increase rocksdb_max_manual_compactions or stop issuing more manual compactions.
connection default;
drop table t4;
CREATE TABLE t5 (
a int not null,
......
DROP TABLE IF EXISTS t1;
CREATE TABLE T1 (a INT PRIMARY KEY AUTO_INCREMENT) ENGINE=ROCKSDB;
INSERT INTO T1 VALUES();
"con1: Creating explict snapshot"
SELECT * FROM T1;
a
1
"con2: Inserting a row"
INSERT INTO T1 VALUES();
SELECT * FROM T1;
a
1
2
"con2: Attaching snapshot id 1"
ATTACH EXPLICIT ROCKSDB SNAPSHOT 1;
"con2: New row should not be visible"
SELECT * FROM T1;
a
1
"con2: Releasing snapshot"
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
"con2: New row should be visible"
SELECT * FROM T1;
a
1
2
"con1: New row should not be visible"
SELECT * FROM T1;
a
1
"con1: Releasing snapshot"
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
"con1: New row should be visible"
SELECT * FROM T1;
a
1
2
"con1: Starting shared snapshot"
SELECT * FROM T1;
a
1
2
"con2: Inserting a row"
INSERT INTO T1 VALUES();
SELECT * FROM T1;
a
1
2
3
"con2: Starting existing snapshot"
START TRANSACTION WITH EXISTING ROCKSDB SNAPSHOT 2;
"con2: New row should not be visible"
SELECT * FROM T1;
a
1
2
COMMIT;
"con2: New row should be visible"
SELECT * FROM T1;
a
1
2
3
COMMIT;
"con1: New row should be visible"
SELECT * FROM T1;
a
1
2
3
"con1: Creating explict snapshot"
"con2: Trying to insert row"
INSERT INTO T1 VALUES();
ERROR HY000: Can't execute updates when an explicit snapshot is associated with the connection using CREATE|ATTACH EXPLICIT [ENGINE] SNAPSHOT
"con2: Attaching existing snapshot"
ATTACH EXPLICIT ROCKSDB SNAPSHOT 3;
"con2: Trying to insert row"
INSERT INTO T1 VALUES();
ERROR HY000: Can't execute updates when an explicit snapshot is associated with the connection using CREATE|ATTACH EXPLICIT [ENGINE] SNAPSHOT
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
"con1: Starting shared snapshot"
"con1: Trying to insert row"
INSERT INTO T1 VALUES();
ERROR HY000: Can't execute updates when you started a transaction with START TRANSACTION WITH CONSISTENT|SHARED|EXISTING [ROCKSDB] SNAPSHOT.
"con2: Starting existing snapshot"
START TRANSACTION WITH EXISTING ROCKSDB SNAPSHOT 4;
"con2: Trying to insert row"
INSERT INTO T1 VALUES();
ERROR HY000: Can't execute updates when you started a transaction with START TRANSACTION WITH CONSISTENT|SHARED|EXISTING [ROCKSDB] SNAPSHOT.
COMMIT;
COMMIT;
"con1: Creating explicit snapshot"
CREATE EXPLICIT ROCKSDB SNAPSHOT;
SELECT * FROM T1;
a
1
2
3
"con2: Inserting a row"
INSERT INTO T1 VALUES();
SELECT * FROM T1;
a
1
2
3
4
"con1: New row should not be seen"
SELECT * FROM T1;
a
1
2
3
"con1: Creating another explicit snapshot"
CREATE EXPLICIT ROCKSDB SNAPSHOT;
"con1: Now the new row should be seen"
SELECT * FROM T1;
a
1
2
3
4
"con1: Starting transaction with consistent snapshot"
START TRANSACTION WITH CONSISTENT ROCKSDB SNAPSHOT;
SELECT * FROM T1;
a
1
2
3
4
"con2: Inserting a row"
INSERT INTO T1 VALUES();
SELECT * FROM T1;
a
1
2
3
4
5
"con1: The new row should not be seen"
SELECT * FROM T1;
a
1
2
3
4
"con1: Creating another explicit snapshot"
CREATE EXPLICIT ROCKSDB SNAPSHOT;
"con1: The new row should still not be seen"
SELECT * FROM T1;
a
1
2
3
4
"con1: Committing trx"
COMMIT;
"con1: The new row should now be seen because of the new explicit snapshot created above"
SELECT * FROM T1;
a
1
2
3
4
5
"con1: Releasing explicit snapshot"
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
"con1: Starting transaction with shared snapshot"
START TRANSACTION WITH SHARED ROCKSDB SNAPSHOT;
SELECT * FROM T1;
a
1
2
3
4
5
"con2: Inserting a row"
INSERT INTO T1 VALUES();
SELECT * FROM T1;
a
1
2
3
4
5
6
"con1: The new row should not be seen"
SELECT * FROM T1;
a
1
2
3
4
5
"con1: Starting another transaction with shared snapshot"
START TRANSACTION WITH SHARED ROCKSDB SNAPSHOT;
"con1: The new row should now be seen"
SELECT * FROM T1;
a
1
2
3
4
5
6
COMMIT;
"con1: Creating explicit snapshot"
CREATE EXPLICIT ROCKSDB SNAPSHOT;
SELECT * FROM T1;
a
1
2
3
4
5
6
"con1: Releasing explicit snapshot"
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
"con1: Releasing explicit snapshot again"
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
ERROR HY000: Cannot process explicit snapshot
"con1: Starting transaction with shared snapshot"
START TRANSACTION WITH SHARED ROCKSDB SNAPSHOT;
SELECT * FROM T1;
a
1
2
3
4
5
6
"con2: Inserting a row"
INSERT INTO T1 VALUES();
SELECT * FROM T1;
a
1
2
3
4
5
6
7
"con1: Creating explicit snapshot"
CREATE EXPLICIT ROCKSDB SNAPSHOT;
SELECT * FROM T1;
a
1
2
3
4
5
6
"con1: Releasing explicit snapshot"
RELEASE EXPLICIT ROCKSDB SNAPSHOT;
"con1: The new row should not be seen"
SELECT* FROM T1;
a
1
2
3
4
5
6
COMMIT;
DROP TABLE T1;
......@@ -14,6 +14,7 @@ show create table information_schema.rocksdb_deadlock;
Table Create Table
ROCKSDB_DEADLOCK CREATE TEMPORARY TABLE `ROCKSDB_DEADLOCK` (
`DEADLOCK_ID` bigint(8) NOT NULL DEFAULT 0,
`TIMESTAMP` bigint(8) NOT NULL DEFAULT 0,
`TRANSACTION_ID` bigint(8) NOT NULL DEFAULT 0,
`CF_NAME` varchar(193) NOT NULL DEFAULT '',
`WAITING_KEY` varchar(513) NOT NULL DEFAULT '',
......@@ -25,7 +26,7 @@ ROCKSDB_DEADLOCK CREATE TEMPORARY TABLE `ROCKSDB_DEADLOCK` (
create table t (i int primary key) engine=rocksdb;
insert into t values (1), (2), (3);
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
Deadlock #1
connection con1;
begin;
......@@ -49,9 +50,9 @@ i
rollback;
connection default;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
Deadlock #2
connection con1;
begin;
......@@ -75,11 +76,11 @@ i
rollback;
connection default;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
set global rocksdb_max_latest_deadlocks = 10;
Deadlock #3
connection con1;
......@@ -104,18 +105,18 @@ i
rollback;
connection default;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
set global rocksdb_max_latest_deadlocks = 1;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 1
connection con3;
set rocksdb_deadlock_detect_depth = 2;
Deadlock #4
......@@ -153,7 +154,7 @@ rollback;
connection default;
set global rocksdb_max_latest_deadlocks = 5;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
Deadlock #5
connection con1;
begin;
......@@ -195,9 +196,9 @@ connection con3;
rollback;
connection default;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY SHARED PRIMARY test.t 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE PRIMARY test.t 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY SHARED PRIMARY test.t 1
disconnect con1;
disconnect con2;
disconnect con3;
......@@ -205,11 +206,11 @@ set global rocksdb_lock_wait_timeout = @prior_lock_wait_timeout;
set global rocksdb_deadlock_detect = @prior_deadlock_detect;
drop table t;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY EXCLUSIVE INDEX_NAME TABLE_NAME 0
DEADLOCK_ID TRANSACTION_ID default WAITING_KEY SHARED INDEX_NAME TABLE_NAME 1
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY EXCLUSIVE INDEX_NAME TABLE_NAME 0
DEADLOCK_ID TIMESTAMP TRANSACTION_ID default WAITING_KEY SHARED INDEX_NAME TABLE_NAME 1
set global rocksdb_max_latest_deadlocks = 0;
# Clears deadlock buffer of any existent deadlocks.
set global rocksdb_max_latest_deadlocks = @prior_max_latest_deadlocks;
select * from information_schema.rocksdb_deadlock;
DEADLOCK_ID TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
DEADLOCK_ID TIMESTAMP TRANSACTION_ID CF_NAME WAITING_KEY LOCK_TYPE INDEX_NAME TABLE_NAME ROLLED_BACK
......@@ -24,5 +24,8 @@ WHERE INDEX_NUMBER =
WHERE TABLE_NAME = 't2' AND INDEX_NAME = "PRIMARY");
COLUMN_FAMILY INDEX_NUMBER SST_NAME NUM_ROWS DATA_SIZE ENTRY_DELETES ENTRY_SINGLEDELETES ENTRY_MERGES ENTRY_OTHERS DISTINCT_KEYS_PREFIX
# # SSTNAME 4 # # # # # 4
select count(*) > 0 from information_schema.rocksdb_sst_props;
count(*) > 0
1
DROP TABLE t1;
DROP TABLE t2;
......@@ -16,6 +16,7 @@ SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK=0;
select VALUE into @keysIn from INFORMATION_SCHEMA.ROCKSDB_COMPACTION_STATS where CF_NAME = 'default' and LEVEL = 'Sum' and TYPE = 'KeyIn';
CREATE TABLE t1 (i1 INT, i2 INT, PRIMARY KEY (i1)) ENGINE = ROCKSDB;
INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3);
set global rocksdb_force_flush_memtable_now = true;
select * from INFORMATION_SCHEMA.ROCKSDB_GLOBAL_INFO;
TYPE NAME VALUE
MAX_INDEX_ID MAX_INDEX_ID max_index_id
......
......@@ -104,3 +104,18 @@ SELECT a,b FROM t1;
a b
UNLOCK TABLES;
DROP TABLE t1, t2;
CREATE TABLE t1 (i INT) ENGINE=MyISAM;
HANDLER t1 OPEN h;
CREATE TABLE t2 (i INT) ENGINE=RocksDB;
LOCK TABLES t2 WRITE;
connect con1,localhost,root,,test;
connection con1;
FLUSH TABLES WITH READ LOCK;
connection default;
INSERT INTO t2 VALUES (1);
UNLOCK TABLES;
HANDLER h CLOSE;
connection con1;
disconnect con1;
connection default;
DROP TABLE t1, t2;
......@@ -78,6 +78,7 @@ ROCKSDB_CF_OPTIONS Stable
ROCKSDB_COMPACTION_STATS Stable
ROCKSDB_GLOBAL_INFO Stable
ROCKSDB_DDL Stable
ROCKSDB_SST_PROPS Stable
ROCKSDB_INDEX_FILE_MAP Stable
ROCKSDB_LOCKS Stable
ROCKSDB_TRX Stable
......
......@@ -34,7 +34,10 @@ update r1 set value1=value1+100 where id1=1 and id2=1 and id3='1';
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
/*!50601 SELECT count(*) INTO @is_rocksdb_supported FROM information_schema.SESSION_VARIABLES WHERE variable_name='rocksdb_bulk_load' */;
/*!50601 SELECT count(*) INTO @is_mysql8 FROM information_schema.TABLES WHERE table_schema='performance_schema' AND table_name='session_variables' */;
/*!50601 SET @check_rocksdb = CONCAT( 'SELECT count(*) INTO @is_rocksdb_supported FROM ', IF (@is_mysql8, 'performance', 'information'), '_schema.session_variables WHERE variable_name=\'rocksdb_bulk_load\'') */;
/*!50601 PREPARE s FROM @check_rocksdb */;
/*!50601 EXECUTE s */;
/*!50601 SET @enable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load=1', 'SET @dummy = 0') */;
/*!50601 PREPARE s FROM @enable_bulk_load */;
/*!50601 EXECUTE s */;
......
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=RocksDB;
INSERT INTO t1 VALUES (1), (2), (3);
SET DEBUG_SYNC="handler_ha_index_next_end SIGNAL idx_scan_in_progress WAIT_FOR finish_scan";
SELECT * FROM t1;
SET DEBUG_SYNC="now WAIT_FOR idx_scan_in_progress";
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
SELECT * FROM t1;
a
1
2
3
SET DEBUG_SYNC="now SIGNAL finish_scan";
a
1
2
3
DROP TABLE t1;
......@@ -24,11 +24,9 @@ CF_NAME OPTION_TYPE VALUE
__system__ PREFIX_EXTRACTOR rocksdb.CappedPrefix.24
cf1 PREFIX_EXTRACTOR rocksdb.CappedPrefix.24
default PREFIX_EXTRACTOR rocksdb.CappedPrefix.24
SET @@global.rocksdb_update_cf_options = 'cf1={prefix_extractor=capped:26};';
Restarting with new Prefix Extractor...
Changed Prefix Extractor (after restart):
Changed Prefix Extractor (after update_cf_options set, without restart):
SELECT * FROM information_schema.rocksdb_cf_options WHERE option_type like '%prefix_extractor%';
CF_NAME OPTION_TYPE VALUE
......@@ -65,6 +63,7 @@ COUNT(*)
select variable_value-@u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
variable_value-@u
2
SET @@global.rocksdb_update_cf_options = '';
set global rocksdb_compact_cf='cf1';
select variable_value into @u from information_schema.global_status where variable_name='rocksdb_bloom_filter_prefix_checked';
SELECT COUNT(*) FROM t1 WHERE id1=1 AND id2=30 AND id3=30;
......
......@@ -880,6 +880,7 @@ rocksdb_block_restart_interval 16
rocksdb_block_size 4096
rocksdb_block_size_deviation 10
rocksdb_bulk_load OFF
rocksdb_bulk_load_allow_sk OFF
rocksdb_bulk_load_allow_unsorted OFF
rocksdb_bulk_load_size 1000
rocksdb_bytes_per_sync 0
......@@ -887,6 +888,7 @@ rocksdb_cache_index_and_filter_blocks ON
rocksdb_checksums_pct 100
rocksdb_collect_sst_properties ON
rocksdb_commit_in_the_middle OFF
rocksdb_commit_time_batch_for_recovery OFF
rocksdb_compact_cf
rocksdb_compaction_readahead_size 0
rocksdb_compaction_sequential_deletes 0
......@@ -900,6 +902,7 @@ rocksdb_datadir ./#rocksdb
rocksdb_db_write_buffer_size 0
rocksdb_deadlock_detect OFF
rocksdb_deadlock_detect_depth 50
rocksdb_debug_manual_compaction_delay 0
rocksdb_debug_optimizer_no_zero_cardinality ON
rocksdb_debug_ttl_ignore_pk OFF
rocksdb_debug_ttl_read_filter_ts 0
......@@ -915,6 +918,7 @@ rocksdb_enable_ttl ON
rocksdb_enable_ttl_read_filtering ON
rocksdb_enable_write_thread_adaptive_yield OFF
rocksdb_error_if_exists OFF
rocksdb_error_on_suboptimal_collation ON
rocksdb_flush_log_at_trx_commit 0
rocksdb_force_compute_memtable_stats ON
rocksdb_force_compute_memtable_stats_cachetime 0
......@@ -934,12 +938,14 @@ rocksdb_lock_scanned_rows OFF
rocksdb_lock_wait_timeout 1
rocksdb_log_file_time_to_roll 0
rocksdb_manifest_preallocation_size 4194304
rocksdb_manual_compaction_threads 0
rocksdb_manual_wal_flush ON
rocksdb_master_skip_tx_api OFF
rocksdb_max_background_jobs 2
rocksdb_max_latest_deadlocks 5
rocksdb_max_log_file_size 0
rocksdb_max_manifest_file_size 18446744073709551615
rocksdb_max_manifest_file_size 1073741824
rocksdb_max_manual_compactions 10
rocksdb_max_row_locks 1048576
rocksdb_max_subcompactions 1
rocksdb_max_total_wal_size 0
......@@ -969,6 +975,7 @@ rocksdb_skip_fill_cache OFF
rocksdb_skip_unique_check_tables .*
rocksdb_sst_mgr_rate_bytes_per_sec 0
rocksdb_stats_dump_period_sec 600
rocksdb_stats_recalc_rate 0
rocksdb_store_row_debug_checksums OFF
rocksdb_strict_collation_check OFF
rocksdb_strict_collation_exceptions
......@@ -995,6 +1002,7 @@ rocksdb_whole_key_filtering ON
rocksdb_write_batch_max_bytes 0
rocksdb_write_disable_wal OFF
rocksdb_write_ignore_missing_column_families OFF
rocksdb_write_policy write_committed
create table t47 (pk int primary key, col1 varchar(12)) engine=rocksdb;
insert into t47 values (1, 'row1');
insert into t47 values (2, 'row2');
......@@ -1351,7 +1359,7 @@ insert into t1 select (@a:=@a+1), 1234 from information_schema.session_variables
set @tmp1= @@rocksdb_max_row_locks;
set rocksdb_max_row_locks= 20;
update t1 set a=a+10;
ERROR HY000: Status error 10 received from RocksDB: Operation aborted: Failed to acquire lock due to max_num_locks limit
ERROR HY000: Got error 10 'Operation aborted: Failed to acquire lock due to max_num_locks limit' from ROCKSDB
DROP TABLE t1;
#
# Test AUTO_INCREMENT behavior problem,
......@@ -1495,6 +1503,8 @@ Rocksdb_block_cache_index_miss #
Rocksdb_block_cache_miss #
Rocksdb_block_cachecompressed_hit #
Rocksdb_block_cachecompressed_miss #
Rocksdb_bloom_filter_full_positive #
Rocksdb_bloom_filter_full_true_positive #
Rocksdb_bloom_filter_prefix_checked #
Rocksdb_bloom_filter_prefix_useful #
Rocksdb_bloom_filter_useful #
......@@ -1511,6 +1521,8 @@ Rocksdb_get_hit_l1 #
Rocksdb_get_hit_l2_and_up #
Rocksdb_getupdatessince_calls #
Rocksdb_iter_bytes_read #
Rocksdb_manual_compactions_processed #
Rocksdb_manual_compactions_running #
Rocksdb_memtable_hit #
Rocksdb_memtable_miss #
Rocksdb_no_file_closes #
......@@ -1602,6 +1614,8 @@ ROCKSDB_BLOCK_CACHE_INDEX_MISS
ROCKSDB_BLOCK_CACHE_MISS
ROCKSDB_BLOCK_CACHECOMPRESSED_HIT
ROCKSDB_BLOCK_CACHECOMPRESSED_MISS
ROCKSDB_BLOOM_FILTER_FULL_POSITIVE
ROCKSDB_BLOOM_FILTER_FULL_TRUE_POSITIVE
ROCKSDB_BLOOM_FILTER_PREFIX_CHECKED
ROCKSDB_BLOOM_FILTER_PREFIX_USEFUL
ROCKSDB_BLOOM_FILTER_USEFUL
......@@ -1618,6 +1632,8 @@ ROCKSDB_GET_HIT_L1
ROCKSDB_GET_HIT_L2_AND_UP
ROCKSDB_GETUPDATESSINCE_CALLS
ROCKSDB_ITER_BYTES_READ
ROCKSDB_MANUAL_COMPACTIONS_PROCESSED
ROCKSDB_MANUAL_COMPACTIONS_RUNNING
ROCKSDB_MEMTABLE_HIT
ROCKSDB_MEMTABLE_MISS
ROCKSDB_NO_FILE_CLOSES
......@@ -1711,6 +1727,8 @@ ROCKSDB_BLOCK_CACHE_INDEX_MISS
ROCKSDB_BLOCK_CACHE_MISS
ROCKSDB_BLOCK_CACHECOMPRESSED_HIT
ROCKSDB_BLOCK_CACHECOMPRESSED_MISS
ROCKSDB_BLOOM_FILTER_FULL_POSITIVE
ROCKSDB_BLOOM_FILTER_FULL_TRUE_POSITIVE
ROCKSDB_BLOOM_FILTER_PREFIX_CHECKED
ROCKSDB_BLOOM_FILTER_PREFIX_USEFUL
ROCKSDB_BLOOM_FILTER_USEFUL
......@@ -1727,6 +1745,8 @@ ROCKSDB_GET_HIT_L1
ROCKSDB_GET_HIT_L2_AND_UP
ROCKSDB_GETUPDATESSINCE_CALLS
ROCKSDB_ITER_BYTES_READ
ROCKSDB_MANUAL_COMPACTIONS_PROCESSED
ROCKSDB_MANUAL_COMPACTIONS_RUNNING
ROCKSDB_MEMTABLE_HIT
ROCKSDB_MEMTABLE_MISS
ROCKSDB_NO_FILE_CLOSES
......
......@@ -11,7 +11,12 @@ insert into t1 values (1,1,1),(2,2,2),(3,3,3);
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
FOUND 1 /0 table records had checksums/ in mysqld.1.err
CHECKTABLE t1: Checking table t1
CHECKTABLE t1: Checking index a
CHECKTABLE t1: ... 3 index entries checked (0 had checksums)
CHECKTABLE t1: Checking index b
CHECKTABLE t1: ... 3 index entries checked (0 had checksums)
CHECKTABLE t1: 0 table records had checksums
drop table t1;
set session rocksdb_store_row_debug_checksums=on;
create table t2 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
......@@ -19,7 +24,12 @@ insert into t2 values (1,1,1),(2,2,2),(3,3,3);
check table t2;
Table Op Msg_type Msg_text
test.t2 check status OK
FOUND 1 /3 table records had checksums/ in mysqld.1.err
CHECKTABLE t2: Checking table t2
CHECKTABLE t2: Checking index a
CHECKTABLE t2: ... 3 index entries checked (3 had checksums)
CHECKTABLE t2: Checking index b
CHECKTABLE t2: ... 3 index entries checked (3 had checksums)
CHECKTABLE t2: 3 table records had checksums
# Now, make a table that has both rows with checksums and without
create table t3 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
insert into t3 values (1,1,1),(2,2,2),(3,3,3);
......@@ -29,16 +39,21 @@ set session rocksdb_store_row_debug_checksums=on;
check table t3;
Table Op Msg_type Msg_text
test.t3 check status OK
FOUND 1 /2 table records had checksums/ in mysqld.1.err
CHECKTABLE t3: Checking table t3
CHECKTABLE t3: Checking index a
CHECKTABLE t3: ... 3 index entries checked (3 had checksums)
CHECKTABLE t3: Checking index b
CHECKTABLE t3: ... 3 index entries checked (2 had checksums)
CHECKTABLE t3: 2 table records had checksums
set session rocksdb_store_row_debug_checksums=on;
set session rocksdb_checksums_pct=5;
create table t4 (pk int primary key, a int, b int, key(a), key(b)) engine=rocksdb;
check table t4;
Table Op Msg_type Msg_text
test.t4 check status OK
10000 index entries had around 500 checksums
10000 index entries had around 500 checksums
Around 500 table records had checksums
4000 index entries had around 200 checksums
4000 index entries had around 200 checksums
Around 200 table records had checksums
set session rocksdb_checksums_pct=100;
#
# Ok, table t2 has all rows with checksums. Simulate a few checksum mismatches.
......
......@@ -50,10 +50,9 @@ i
3
insert into t values (4), (1);
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
# Statement should be rolled back
# Transaction should be rolled back
select * from t;
i
3
rollback;
connection con2;
i
......@@ -62,6 +61,26 @@ connection con1;
i
rollback;
connection default;
create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb;
insert into t1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10);
connection con1;
begin;
update t1 force index (value) set value2=value2+1 where value=3;
connection con2;
begin;
update t1 force index (value) set value2=value2+1 where value=2;
update t1 force index (value) set value2=value2+1 where value=4;
connection con1;
update t1 force index (value) set value2=value2+1 where value=4;
connection con2;
update t1 force index (value) set value2=value2+1 where value=3;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
connection con1;
rollback;
connection con2;
rollback;
drop table t1;
connection default;
disconnect con1;
disconnect con2;
disconnect con3;
......
......@@ -50,10 +50,9 @@ i
3
insert into t values (4), (1);
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
# Statement should be rolled back
# Transaction should be rolled back
select * from t;
i
3
rollback;
connection con2;
i
......@@ -62,6 +61,26 @@ connection con1;
i
rollback;
connection default;
create table t1 (id int primary key, value int, value2 int, index(value)) engine=rocksdb;
insert into t1 values (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10);
connection con1;
begin;
update t1 force index (value) set value2=value2+1 where value=3;
connection con2;
begin;
update t1 force index (value) set value2=value2+1 where value=2;
update t1 force index (value) set value2=value2+1 where value=4;
connection con1;
update t1 force index (value) set value2=value2+1 where value=4;
connection con2;
update t1 force index (value) set value2=value2+1 where value=3;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
connection con1;
rollback;
connection con2;
rollback;
drop table t1;
connection default;
disconnect con1;
disconnect con2;
disconnect con3;
......
connect con, localhost, root,,;
connection default;
create table t1 (a int primary key, b int unique key) engine = rocksdb;
insert into t1 values(1, 1);
connection con;
begin;
update t1 set b = 2 where b = 1;
connection default;
insert into t1 values(2, 1);
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection con;
rollback;
select * from t1;
a b
1 1
connection default;
drop table t1;
disconnect con;
......@@ -136,6 +136,7 @@ __system__ TABLE_FACTORY::FLUSH_BLOCK_POLICY_FACTORY #
__system__ TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
__system__ TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS_WITH_HIGH_PRIORITY #
__system__ TABLE_FACTORY::PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE #
__system__ TABLE_FACTORY::PIN_TOP_LEVEL_INDEX_AND_FILTER #
__system__ TABLE_FACTORY::INDEX_TYPE #
__system__ TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
__system__ TABLE_FACTORY::CHECKSUM #
......@@ -162,6 +163,7 @@ __system__ TABLE_FACTORY::VERIFY_COMPRESSION #
__system__ TABLE_FACTORY::READ_AMP_BYTES_PER_BIT #
__system__ TABLE_FACTORY::FORMAT_VERSION #
__system__ TABLE_FACTORY::ENABLE_INDEX_COMPRESSION #
__system__ TABLE_FACTORY::BLOCK_ALIGN #
cf_t1 COMPARATOR #
cf_t1 MERGE_OPERATOR #
cf_t1 COMPACTION_FILTER #
......@@ -207,6 +209,7 @@ cf_t1 TABLE_FACTORY::FLUSH_BLOCK_POLICY_FACTORY #
cf_t1 TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
cf_t1 TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS_WITH_HIGH_PRIORITY #
cf_t1 TABLE_FACTORY::PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE #
cf_t1 TABLE_FACTORY::PIN_TOP_LEVEL_INDEX_AND_FILTER #
cf_t1 TABLE_FACTORY::INDEX_TYPE #
cf_t1 TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
cf_t1 TABLE_FACTORY::CHECKSUM #
......@@ -233,6 +236,7 @@ cf_t1 TABLE_FACTORY::VERIFY_COMPRESSION #
cf_t1 TABLE_FACTORY::READ_AMP_BYTES_PER_BIT #
cf_t1 TABLE_FACTORY::FORMAT_VERSION #
cf_t1 TABLE_FACTORY::ENABLE_INDEX_COMPRESSION #
cf_t1 TABLE_FACTORY::BLOCK_ALIGN #
default COMPARATOR #
default MERGE_OPERATOR #
default COMPACTION_FILTER #
......@@ -278,6 +282,7 @@ default TABLE_FACTORY::FLUSH_BLOCK_POLICY_FACTORY #
default TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
default TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS_WITH_HIGH_PRIORITY #
default TABLE_FACTORY::PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE #
default TABLE_FACTORY::PIN_TOP_LEVEL_INDEX_AND_FILTER #
default TABLE_FACTORY::INDEX_TYPE #
default TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
default TABLE_FACTORY::CHECKSUM #
......@@ -304,6 +309,7 @@ default TABLE_FACTORY::VERIFY_COMPRESSION #
default TABLE_FACTORY::READ_AMP_BYTES_PER_BIT #
default TABLE_FACTORY::FORMAT_VERSION #
default TABLE_FACTORY::ENABLE_INDEX_COMPRESSION #
default TABLE_FACTORY::BLOCK_ALIGN #
rev:cf_t2 COMPARATOR #
rev:cf_t2 MERGE_OPERATOR #
rev:cf_t2 COMPACTION_FILTER #
......@@ -349,6 +355,7 @@ rev:cf_t2 TABLE_FACTORY::FLUSH_BLOCK_POLICY_FACTORY #
rev:cf_t2 TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS #
rev:cf_t2 TABLE_FACTORY::CACHE_INDEX_AND_FILTER_BLOCKS_WITH_HIGH_PRIORITY #
rev:cf_t2 TABLE_FACTORY::PIN_L0_FILTER_AND_INDEX_BLOCKS_IN_CACHE #
rev:cf_t2 TABLE_FACTORY::PIN_TOP_LEVEL_INDEX_AND_FILTER #
rev:cf_t2 TABLE_FACTORY::INDEX_TYPE #
rev:cf_t2 TABLE_FACTORY::HASH_INDEX_ALLOW_COLLISION #
rev:cf_t2 TABLE_FACTORY::CHECKSUM #
......@@ -375,6 +382,7 @@ rev:cf_t2 TABLE_FACTORY::VERIFY_COMPRESSION #
rev:cf_t2 TABLE_FACTORY::READ_AMP_BYTES_PER_BIT #
rev:cf_t2 TABLE_FACTORY::FORMAT_VERSION #
rev:cf_t2 TABLE_FACTORY::ENABLE_INDEX_COMPRESSION #
rev:cf_t2 TABLE_FACTORY::BLOCK_ALIGN #
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
......@@ -419,4 +427,49 @@ END OF ROCKSDB TRANSACTION MONITOR OUTPUT
=========================================
ROLLBACK;
START TRANSACTION WITH SHARED ROCKSDB SNAPSHOT;
File Position Gtid_executed Snapshot_ID
0 1
SHOW ENGINE rocksdb STATUS;
Type Name Status
STATISTICS # #
DBSTATS # #
CF_COMPACTION # #
CF_COMPACTION # #
CF_COMPACTION # #
CF_COMPACTION # #
MEMORY_STATS # #
BG_THREADS # #
BG_THREADS # #
EXPLICIT_SNAPSHOTS # #
ROLLBACK;
CREATE EXPLICIT rocksdb SNAPSHOT;
File Position Gtid_executed Snapshot_ID
0 2
SHOW ENGINE rocksdb STATUS;
Type Name Status
STATISTICS # #
DBSTATS # #
CF_COMPACTION # #
CF_COMPACTION # #
CF_COMPACTION # #
CF_COMPACTION # #
MEMORY_STATS # #
BG_THREADS # #
BG_THREADS # #
EXPLICIT_SNAPSHOTS # #
RELEASE EXPLICIT rocksdb SNAPSHOT;
File Position Gtid_executed Snapshot_ID
0 2
SHOW ENGINE rocksdb STATUS;
Type Name Status
STATISTICS # #
DBSTATS # #
CF_COMPACTION # #
CF_COMPACTION # #
CF_COMPACTION # #
CF_COMPACTION # #
MEMORY_STATS # #
BG_THREADS # #
BG_THREADS # #
SET GLOBAL rocksdb_max_background_jobs= @save.rocksdb_max_background_jobs;
......@@ -958,3 +958,20 @@ a
rollback;
drop function func;
drop table t1,t2,t3;
#
# MDEV-16710: Slave SQL: Could not execute Update_rows_v1 event with RocksDB and triggers
# Issue#857: MyRocks: Incorrect behavior when multiple statements fail inside a transaction
#
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=RocksDB;
INSERT INTO t1 VALUES (1);
CREATE TABLE t2 (b INT PRIMARY KEY) ENGINE=RocksDB;
CREATE TRIGGER tr AFTER INSERT ON t2 FOR EACH ROW INSERT INTO non_existing_table VALUES (NULL);
BEGIN;
DELETE FROM t1;
INSERT INTO t2 VALUES (1);
INSERT INTO t2 VALUES (2);
# Must return empty result:
SELECT * FROM t1;
a
COMMIT;
drop table t1,t2;
......@@ -3,8 +3,8 @@ include/master-slave.inc
DROP TABLE IF EXISTS t1;
connection slave;
include/stop_slave.inc
create table t1 (a int, b int, primary key (a), unique key (b)) engine=rocksdb;
connection master;
create table t1 (a int) engine=rocksdb;
connection slave;
show variables like 'rpl_skip_tx_api';
Variable_name Value
......
......@@ -111,3 +111,11 @@ a b pk
55 NULL 11
10050 NULL 12
DROP TABLE t1;
CREATE TABLE t1 (a INT, b CHAR(8), UNIQUE INDEX(a)) ENGINE=RocksDB;
INSERT INTO t1 (a,b) VALUES (1,'foo'),(2,'bar');
UPDATE t1 SET a=a+100;
SELECT * FROM t1;
a b
101 foo
102 bar
DROP TABLE t1;
Checking direct reads
CREATE TABLE t1 (pk INT PRIMARY KEY DEFAULT '0', a INT(11), b CHAR(8)) ENGINE=rocksdb;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`pk` int(11) NOT NULL DEFAULT 0,
`a` int(11) DEFAULT NULL,
`b` char(8) DEFAULT NULL,
PRIMARY KEY (`pk`)
) ENGINE=ROCKSDB DEFAULT CHARSET=latin1
INSERT INTO t1 VALUES (1, 1,'a');
INSERT INTO t1 (a,b) VALUES (2,'b');
set global rocksdb_force_flush_memtable_now=1;
SELECT a,b FROM t1;
a b
1 a
2 b
DROP TABLE t1;
......@@ -2,7 +2,8 @@ call mtr.add_suppression('RocksDB: Schema mismatch');
CREATE TABLE t1 (pk int primary key) ENGINE=ROCKSDB;
CREATE TABLE t2 (pk int primary key) ENGINE=ROCKSDB PARTITION BY KEY(pk) PARTITIONS 4;
"Expect errors that we are missing two .frm files"
FOUND 2 /Schema mismatch/ in mysqld.1.err
FOUND 1 /RocksDB: Schema mismatch - Table test.t1 is registered in RocksDB but does not have a .frm file/ in validate_datadic.err
FOUND 1 /RocksDB: Schema mismatch - Table test.t2 is registered in RocksDB but does not have a .frm file/ in validate_datadic.err
"Expect an error that we have an extra .frm file"
FOUND 3 /Schema mismatch/ in mysqld.1.err
FOUND 1 /Schema mismatch - A .frm file exists for table test.t1_dummy, but that table is not registered in RocksDB/ in validate_datadic.err
DROP TABLE t1, t2;
......@@ -394,6 +394,24 @@ if ($end_max_index_id <= $start_max_index_id) {
SHOW CREATE TABLE t1;
DROP TABLE t1;
# Cardinality checks for indexes statistics
SET @prior_rocksdb_table_stats_sampling_pct = @@rocksdb_table_stats_sampling_pct;
set global rocksdb_table_stats_sampling_pct = 100;
CREATE TABLE t1 (a INT, b INT, PRIMARY KEY ka(a)) ENGINE=RocksDB;
INSERT INTO t1 (a, b) VALUES (1, 10);
INSERT INTO t1 (a, b) VALUES (2, 10);
INSERT INTO t1 (a, b) VALUES (3, 20);
INSERT INTO t1 (a, b) VALUES (4, 20);
set global rocksdb_force_flush_memtable_now=1;
analyze table t1;
SHOW INDEX in t1;
ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
SHOW INDEX in t1;
DROP TABLE t1;
SET global rocksdb_table_stats_sampling_pct = @prior_rocksdb_table_stats_sampling_pct;
--source include/have_rocksdb.inc
--source include/have_debug.inc
--source include/have_partition.inc
--source include/not_valgrind.inc
--disable_warnings
drop table if exists t1;
......
......@@ -68,7 +68,7 @@ ALTER TABLE t1 ADD INDEX kb(b), ALGORITHM=INPLACE;
# hitting max row locks (1M)
set @tmp= @@rocksdb_max_row_locks;
set session rocksdb_max_row_locks=1000;
--error ER_RDB_STATUS_GENERAL
--error ER_GET_ERRMSG
ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY;
set session rocksdb_bulk_load=1;
ALTER TABLE t1 ADD INDEX kb_copy(b), ALGORITHM=COPY;
......
--source include/have_rocksdb.inc
--source include/have_debug.inc
--source include/have_log_bin.inc
--source include/not_valgrind.inc
--echo #
--echo # Testing upgrading from server without merges for auto_increment
......@@ -64,8 +65,8 @@ commit;
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
--source include/wait_until_connected_again.inc
--disable_reconnect
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
select max(i) from t;
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
--echo # After engine prepare
begin;
......@@ -80,8 +81,8 @@ commit;
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
--source include/wait_until_connected_again.inc
--disable_reconnect
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
select max(i) from t;
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
--echo # After binlog
begin;
......@@ -96,8 +97,8 @@ commit;
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
--source include/wait_until_connected_again.inc
--disable_reconnect
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
select max(i) from t;
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
--echo # After everything
begin;
......@@ -112,7 +113,7 @@ commit;
--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
--source include/wait_until_connected_again.inc
--disable_reconnect
select table_schema, table_name, auto_increment from information_schema.tables where table_name = 't';
select max(i) from t;
select max(i) into @row_max from t;
select table_schema, table_name, auto_increment > @row_max from information_schema.tables where table_name = 't';
drop table t;
......@@ -126,3 +126,26 @@ INSERT INTO t1 (a) VALUES (1);
UPDATE t1 SET pk = 3;
ALTER TABLE t1 AUTO_INCREMENT 2;
DROP TABLE t1;
--echo #----------------------------------
--echo # Issue #792 Crash in autoincrement
--echo #----------------------------------
CREATE TABLE t1(C1 DOUBLE AUTO_INCREMENT KEY,C2 CHAR) ENGINE=ROCKSDB;
INSERT INTO t1 VALUES(2177,0);
DROP TABLE t1;
CREATE TABLE t0(c0 BLOB) ENGINE=ROCKSDB;
INSERT INTO t0 VALUES(0);
ALTER TABLE t0 AUTO_INCREMENT=0;
DROP TABLE t0;
--echo #----------------------------------
--echo # Issue #869 Crash in autoincrement
--echo #----------------------------------
CREATE TABLE t1 (pk INT AUTO_INCREMENT, a INT, PRIMARY KEY(pk)) ENGINE=RocksDB;
INSERT INTO t1 (a) VALUES (1);
UPDATE t1 SET pk = 3;
ALTER TABLE t1 AUTO_INCREMENT 2;
DROP TABLE t1;
--rocksdb_override_cf_options=rev:bf5_1={prefix_extractor=capped:4;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;}};
--rocksdb_default_cf_options=write_buffer_size=256k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;}
--rocksdb_override_cf_options=rev:bf5_1={prefix_extractor=capped:4};
--rocksdb_default_cf_options=write_buffer_size=16k;block_based_table_factory={filter_policy=bloomfilter:10:false;whole_key_filtering=0;};prefix_extractor=capped:12
--rocksdb_override_cf_options=cf2={optimize_filters_for_hits=true}
--source include/have_rocksdb.inc
create table r1 (id bigint primary key, value bigint) engine=rocksdb;
create table r2 (id bigint, value bigint, primary key (id) comment 'cf2') engine=rocksdb;
set session rocksdb_bulk_load=1;
--disable_query_log
let $t = 1;
let $i = 1;
while ($t <= 2) {
while ($i <= 1000) {
let $insert = INSERT INTO r$t VALUES($i, $i);
#skipping a row
if ($i != 100) {
eval $insert;
}
inc $i;
}
inc $t;
}
--enable_query_log
set session rocksdb_bulk_load=0;
# bloom filter should be useful on insert (calling GetForUpdate)
select variable_value into @h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
insert into r1 values (100, 100);
select variable_value-@h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
# cf2 has no bloo filter in the bottommost level
select variable_value into @h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
insert into r2 values (100, 100);
select variable_value-@h from information_schema.global_status where variable_name='rocksdb_block_cache_filter_hit';
DROP TABLE r1, r2;
--source include/have_rocksdb.inc
SET rocksdb_bulk_load_size=15;
CREATE TABLE t4 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
CREATE TABLE t3 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
CREATE TABLE t2 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
CREATE TABLE t1 (a INT, b INT, c INT,
PRIMARY KEY (a),
KEY (b),
KEY (c) COMMENT "rev:cf") ENGINE=ROCKSDB;
### Setup the control table ###
--disable_query_log
let $sign = 1;
let $max = 10;
let $i = 1;
while ($i <= $max) {
let $a = 1 + $sign * $i;
let $b = 1 - $sign * $i;
let $sign = -$sign;
let $insert = INSERT INTO t3 VALUES ($a, $b, $b);
eval $insert;
inc $i;
}
--enable_query_log
### Bulk load PK only ###
SET rocksdb_bulk_load=1;
INSERT INTO t1 SELECT * FROM t3 FORCE INDEX (PRIMARY) ORDER BY a;
SELECT count(*) FROM t1 FORCE INDEX (PRIMARY);
SELECT count(*) FROM t1 FORCE INDEX (b);
SELECT count(*) FROM t1 FORCE INDEX (c);
SET rocksdb_bulk_load=0;
SELECT * FROM t1 FORCE INDEX (PRIMARY);
SELECT b FROM t1 FORCE INDEX (b);
SELECT c FROM t1 FORCE INDEX (c);
--echo Checksums should match
CHECKSUM TABLE t3;
CHECKSUM TABLE t1;
### Bulk load PK and SK but require PK order ###
SET rocksdb_bulk_load_allow_sk=1;
SET rocksdb_bulk_load=1;
INSERT INTO t4 SELECT * FROM t3 FORCE INDEX (PRIMARY) ORDER BY a;
SELECT count(*) FROM t4 FORCE INDEX (PRIMARY);
SELECT count(*) FROM t4 FORCE INDEX (b);
SELECT count(*) FROM t4 FORCE INDEX (c);
SET rocksdb_bulk_load=0;
SELECT * FROM t4 FORCE INDEX (PRIMARY);
SELECT b FROM t4 FORCE INDEX (b);
SELECT c FROM t4 FORCE INDEX (c);
--echo Checksums should match
CHECKSUM TABLE t3;
CHECKSUM TABLE t4;
### Bulk load both PK and SK in random order for all ###
SET rocksdb_bulk_load_allow_unsorted=1;
SET rocksdb_bulk_load_allow_sk=1;
SET rocksdb_bulk_load=1;
INSERT INTO t2 SELECT * FROM t3 WHERE b >= 0 ORDER BY b;
INSERT INTO t2 SELECT * FROM t3 WHERE b < 0 ORDER BY b;
SELECT count(*) FROM t2 FORCE INDEX (PRIMARY);
SELECT count(*) FROM t2 FORCE INDEX (b);
SELECT count(*) FROM t2 FORCE INDEX (c);
--disable_query_log
let $sign = 1;
let $max = 20;
let $i = 11;
while ($i <= $max) {
let $a = 1 + $sign * $i;
let $b = 1 - $sign * $i;
let $sign = -$sign;
let $insert = INSERT INTO t2 VALUES ($a, $b, $b);
eval $insert;
inc $i;
}
--enable_query_log
SELECT count(*) FROM t2 FORCE INDEX (PRIMARY);
SELECT count(*) FROM t2 FORCE INDEX (b);
SELECT count(*) FROM t2 FORCE INDEX (c);
SET rocksdb_bulk_load=0;
--disable_query_log
let $sign = 1;
let $max = 20;
let $i = 11;
while ($i <= $max) {
let $a = 1 + $sign * $i;
let $b = 1 - $sign * $i;
let $sign = -$sign;
let $insert = INSERT INTO t3 VALUES ($a, $b, $b);
eval $insert;
inc $i;
}
--enable_query_log
SELECT * FROM t2 FORCE INDEX (PRIMARY);
SELECT b FROM t2 FORCE INDEX (b);
SELECT c FROM t2 FORCE INDEX (c);
--echo Checksums should match
CHECKSUM TABLE t3;
CHECKSUM TABLE t2;
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
DROP TABLE t4;
......@@ -94,5 +94,24 @@ SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema
show index in t1;
SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE();
drop table t1;
CREATE TABLE t2 (a INT, b INT, c INT, d INT, e INT, f INT, g INT,
PRIMARY KEY (a), KEY (c, b, a, d, e, f, g))
ENGINE=ROCKSDB;
--disable_query_log
let $i=0;
while ($i<100)
{
inc $i;
eval insert t2 values($i, $i div 10, 1, 1, 1, 1, 1);
}
--enable_query_log
# Cardinality of key c should be 1 for c, 10 for b, 100 for a and the other fields.
SET GLOBAL rocksdb_force_flush_memtable_now = 1;
ANALYZE TABLE t2;
--echo cardinality of the columns after 'a' must be equal to the cardinality of column 'a'
SELECT CARDINALITY INTO @c FROM information_schema.statistics WHERE TABLE_NAME='t2' AND INDEX_NAME='c' AND COLUMN_NAME='a';
SELECT COLUMN_NAME, CARDINALITY = @c FROM information_schema.statistics WHERE TABLE_NAME='t2' AND INDEX_NAME='c' AND SEQ_IN_INDEX > 3;
drop table t1, t2;
......@@ -8,7 +8,9 @@ let $restart_file= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect;
let $error_log= $MYSQLTEST_VARDIR/log/my_restart.err;
select variable_name, variable_value from information_schema.global_variables where variable_name="rocksdb_ignore_unknown_options";
--exec find $MYSQLD_DATADIR/#rocksdb/OPTIONS* | sort -n | tail -1 | xargs -0 -I {} -t sh -c "echo hello=world>>{}"
--exec find $MYSQLD_DATADIR/#rocksdb/OPTIONS* | sort -t- -k 2 -n | tail -1 | xargs -0 -I {} -t sh -c "sed -i 's/rocksdb_version=.*/rocksdb_version=99.9.9/' {}"
--exec find $MYSQLD_DATADIR/#rocksdb/OPTIONS* | sort -t- -k 2 -n | tail -1 | xargs -0 -I {} -t sh -c "echo hello=world>>{}"
--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
--shutdown_server 10
......@@ -19,8 +21,8 @@ select variable_name, variable_value from information_schema.global_variables wh
let SEARCH_FILE= $error_log;
let SEARCH_PATTERN= RocksDB: Compatibility check against existing database options failed;
--source include/search_pattern_in_file.inc
--remove_file $error_log
--enable_reconnect
--exec echo "restart" > $restart_file
--source include/wait_until_connected_again.inc
--exec find $MYSQLD_DATADIR/#rocksdb/OPTIONS* | sort -n | tail -1 | xargs -0 -I {} -t sh -c "sed -i'' -e '/hello=world/d' {}"
select variable_name, variable_value from information_schema.global_variables where variable_name="rocksdb_ignore_unknown_options";
......@@ -3,14 +3,13 @@
# following check is commented out:
# --source include/have_fullregex.inc
SET @start_global_value = @@global.ROCKSDB_STRICT_COLLATION_EXCEPTIONS;
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
call mtr.add_suppression("Invalid pattern");
# ci non-indexed column is allowed
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text) engine=rocksdb charset utf8;
# ci indexed column is not allowed
--error ER_UNSUPPORTED_COLLATION
ALTER TABLE t1 ADD INDEX (value);
DROP TABLE t1;
# ci indexed column is not allowed
......@@ -30,6 +29,8 @@ DROP TABLE t1;
# cs latin1_bin is allowed
CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset latin1 collate latin1_bin;
# THIS SHOULD FAIL BUT IT DOES NOT
ALTER TABLE t1 collate=latin1_general_ci;
DROP TABLE t1;
# cs utf8_bin is allowed
......@@ -183,5 +184,28 @@ DROP TABLE abc;
# test bad regex (null caused a crash) - Issue 493
SET GLOBAL rocksdb_strict_collation_exceptions=null;
# test for warnings instead of errors
--let $_mysqld_option=--rocksdb_error_on_suboptimal_collation=0
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
--source include/restart_mysqld_with_option.inc
SET GLOBAL rocksdb_strict_collation_check=1;
# ci indexed column is not optimal, should emit a warning
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text, index(value)) engine=rocksdb charset utf8;
DROP TABLE t1;
# ci non-indexed column is allowed
CREATE TABLE t1 (id INT primary key, value varchar(50), value2 varbinary(50), value3 text) engine=rocksdb charset utf8;
# ci indexed column is not allowed, should emit a warning
ALTER TABLE t1 ADD INDEX (value);
DROP TABLE t1;
# cs latin1_bin is allowed
CREATE TABLE t1 (id varchar(20), value varchar(50), value2 varchar(50), value3 text, primary key (id), index(value, value2)) engine=rocksdb charset latin1 collate latin1_bin;
# THIS SHOULD WARN BUT IT DOES NOT
ALTER TABLE t1 collate=latin1_general_ci;
DROP TABLE t1;
# cleanup
SET GLOBAL rocksdb_strict_collation_exceptions=@start_global_value;
--source include/restart_mysqld.inc
!include suite/rpl/my.cnf
[mysqld.1]
binlog_format=row
--source "include/have_rocksdb.inc"
--source "include/have_log_bin.inc"
#
# This test was created because 2pc transactions were failing in MyRocks
# when using detached sessions. The test generates two separate transactions
# in two detached sessions and then attempts to commit them as simultaneously
# as possible. This consistently showed the problem but succeeds after the
# fix was put in place.
CREATE DATABASE db_rpc;
USE db_rpc;
CREATE TABLE t1(pk INT PRIMARY KEY) ENGINE=rocksdb;
SET GLOBAL rocksdb_enable_2pc=1;
connect(con2,localhost,root,,);
connection default;
query_attrs_add rpc_role root;
query_attrs_add rpc_db db_rpc;
SET autocommit = 0;
let $rpc_id1 = get_rpc_id();
if ($rpc_id1 == "") {
echo "Fail: rpc_id not returned as expected";
}
SET autocommit = 0;
let $rpc_id2 = get_rpc_id();
if ($rpc_id2 == "") {
echo "Fail: rpc_id not returned as expected";
}
query_attrs_delete rpc_role;
query_attrs_delete rpc_db;
query_attrs_add rpc_id $rpc_id1;
BEGIN;
query_attrs_delete rpc_id;
query_attrs_add rpc_id $rpc_id2;
BEGIN;
query_attrs_delete rpc_id;
query_attrs_add rpc_id $rpc_id1;
SELECT * from t1;
query_attrs_delete rpc_id;
query_attrs_add rpc_id $rpc_id2;
SELECT * from t1;
query_attrs_delete rpc_id;
query_attrs_add rpc_id $rpc_id1;
INSERT INTO t1 VALUES(1);
query_attrs_delete rpc_id;
query_attrs_add rpc_id $rpc_id2;
INSERT INTO t1 VALUES(2);
query_attrs_delete rpc_id;
query_attrs_add rpc_id $rpc_id1;
send COMMIT;
connection con2;
query_attrs_add rpc_id $rpc_id2;
send COMMIT;
connection default;
reap;
query_attrs_delete rpc_id;
connection con2;
reap;
query_attrs_delete rpc_id;
connection default;
disconnect con2;
SELECT * from db_rpc.t1;
disable_query_log;
eval KILL $rpc_id1;
eval KILL $rpc_id2;
enable_query_log;
DROP DATABASE db_rpc;
USE mysql;
CREATE TABLE mysql_table (a INT) ENGINE=ROCKSDB;
-- error ER_BLOCK_NO_PRIMARY_KEY
CREATE TABLE test.mysql_table (a INT) ENGINE=ROCKSDB;
USE test;
-- error ER_BLOCK_NO_PRIMARY_KEY
CREATE TABLE mysql_table (a INT) ENGINE=ROCKSDB;
-- error ER_BLOCK_NO_PRIMARY_KEY
CREATE TABLE IF NOT EXISTS mysql_table_2 (a INT) ENGINE=ROCKSDB;
-- error ER_BLOCK_NO_PRIMARY_KEY
CREATE TABLE mysql_table_no_cols ENGINE=ROCKSDB;
CREATE TABLE mysql.mysql_table_2 (a INT) ENGINE=ROCKSDB;
CREATE TABLE mysql_primkey (a INT PRIMARY KEY, b INT, c INT, d INT, INDEX (c)) ENGINE=ROCKSDB;
ALTER TABLE mysql_primkey DROP b, DROP a, ADD (f INT PRIMARY KEY);
-- error ER_BLOCK_NO_PRIMARY_KEY
ALTER TABLE mysql_primkey DROP PRIMARY KEY;
CREATE TABLE mysql_primkey2 (a INT PRIMARY KEY, b INT, c INT) ENGINE=ROCKSDB;
ALTER TABLE mysql_primkey2 DROP b;
ALTER TABLE mysql_primkey2 ADD (b INT);
-- error ER_BLOCK_NO_PRIMARY_KEY
ALTER TABLE mysql_primkey2 DROP c, DROP A;
CREATE TABLE mysql_primkey3 (a INT PRIMARY KEY, b INT, c INT, INDEX indexonb (b), INDEX indexonc (c)) ENGINE=ROCKSDB;
ALTER TABLE mysql_primkey3 DROP INDEX indexonb;
ALTER TABLE mysql_primkey3 DROP c;
ALTER TABLE mysql_primkey3 DROP PRIMARY KEY, ADD PRIMARY KEY(b);
CREATE TABLE mysql_primkey4(a INT, b INT, PRIMARY KEY(a), INDEX si (a, b)) ENGINE=ROCKSDB;
DROP INDEX si ON mysql_primkey4;
-- error ER_BLOCK_NO_PRIMARY_KEY
DROP INDEX `PRIMARY` ON mysql_primkey4;
ALTER TABLE mysql.mysql_table ADD PRIMARY KEY (a);
ALTER TABLE mysql.mysql_table DROP PRIMARY KEY;
DROP TABLE mysql_primkey;
DROP TABLE mysql_primkey2;
DROP TABLE mysql_primkey3;
DROP TABLE mysql_primkey4;
USE mysql;
DROP TABLE mysql_table;
DROP TABLE mysql_table_2;
set @prior_lock_wait_timeout = @@rocksdb_lock_wait_timeout;
set @prior_deadlock_detect = @@rocksdb_deadlock_detect;
set @prior_max_latest_deadlocks = @@rocksdb_max_latest_deadlocks;
set global rocksdb_deadlock_detect = on;
set @prior_lock_wait_timeout = @@rocksdb_lock_wait_timeout;
set @prior_deadlock_detect = @@rocksdb_deadlock_detect;
set @prior_max_latest_deadlocks = @@rocksdb_max_latest_deadlocks;
set global rocksdb_deadlock_detect = on;
set global rocksdb_lock_wait_timeout = 10000;
--echo # Clears deadlock buffer of any prior deadlocks.
set global rocksdb_max_latest_deadlocks = 0;
......@@ -21,29 +21,29 @@ let $con3= `SELECT CONNECTION_ID()`;
connection default;
eval create table t (i int primary key) engine=$engine;
insert into t values (1), (2), (3);
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ /TIMESTAMP: [0-9]*/TSTAMP/
show engine rocksdb transaction status;
echo Deadlock #1;
--source include/simple_deadlock.inc
connection default;
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ /TIMESTAMP: [0-9]*/TSTAMP/
show engine rocksdb transaction status;
echo Deadlock #2;
--source include/simple_deadlock.inc
connection default;
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ /TIMESTAMP: [0-9]*/TSTAMP/
show engine rocksdb transaction status;
set global rocksdb_max_latest_deadlocks = 10;
echo Deadlock #3;
--source include/simple_deadlock.inc
connection default;
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ /TIMESTAMP: [0-9]*/TSTAMP/
show engine rocksdb transaction status;
set global rocksdb_max_latest_deadlocks = 1;
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ /TIMESTAMP: [0-9]*/TSTAMP/
show engine rocksdb transaction status;
connection con3;
......@@ -51,15 +51,15 @@ set rocksdb_deadlock_detect_depth = 2;
echo Deadlock #4;
connection con1;
begin;
begin;
select * from t where i=1 for update;
connection con2;
begin;
begin;
select * from t where i=2 for update;
connection con3;
begin;
begin;
select * from t where i=3 for update;
connection con1;
......@@ -84,29 +84,29 @@ select case when variable_value-@a = 1 then 'true' else 'false' end as deadlocks
rollback;
connection con2;
reap;
reap;
rollback;
connection con1;
reap;
reap;
rollback;
connection default;
set global rocksdb_max_latest_deadlocks = 5;
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ /TIMESTAMP: [0-9]*/TSTAMP/
show engine rocksdb transaction status;
echo Deadlock #5;
connection con1;
begin;
begin;
select * from t where i=1 for update;
connection con2;
begin;
begin;
select * from t where i=2 for update;
connection con3;
begin;
begin;
select * from t where i=3 lock in share mode;
connection con1;
......@@ -128,28 +128,58 @@ select * from t where i=1 lock in share mode;
rollback;
connection con1;
reap;
reap;
rollback;
connection con3;
rollback;
connection default;
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ /TIMESTAMP: [0-9]*/TSTAMP/
show engine rocksdb transaction status;
echo Deadlock #6;
connection con1;
create table t1 (id int primary key, value int) engine=rocksdb;
insert into t1 values (1,1),(2,2),(3,3),(4,4),(5,5);
begin;
update t1 set value=value+100 where id=1;
update t1 set value=value+100 where id=2;
connection con2;
begin;
update t1 set value=value+200 where id=3;
connection con1;
send update t1 set value=value+100 where id=3;
connection con2;
let $wait_condition = select count(*) = 1 from information_schema.rocksdb_trx
where thread_id = $con1 and waiting_key != "";
--source include/wait_condition.inc
--error ER_LOCK_DEADLOCK
update t1 set value=value+200 where id=1;
# con2 tx is automatically rolled back
connection con1;
reap;
select * from t1;
drop table t1;
connection default;
disconnect con1;
disconnect con2;
disconnect con3;
set global rocksdb_lock_wait_timeout = @prior_lock_wait_timeout;
set global rocksdb_lock_wait_timeout = @prior_lock_wait_timeout;
set global rocksdb_deadlock_detect = @prior_deadlock_detect;
drop table t;
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ /INDEX_ID: [0-9a-f]*/IDX_ID/
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ /INDEX_ID: [0-9a-f]*/IDX_ID/ /TIMESTAMP: [0-9]*/TSTAMP/
show engine rocksdb transaction status;
set global rocksdb_max_latest_deadlocks = 0;
--echo # Clears deadlock buffer of any existent deadlocks.
set global rocksdb_max_latest_deadlocks = @prior_max_latest_deadlocks;
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ /INDEX_ID: [0-9a-f]*/IDX_ID/
--replace_regex /[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}/TIMESTAMP/ /WAITING KEY: [0-9a-f]{16}/KEY/ /TRANSACTION ID: [0-9]*/TXN_ID/ /INDEX_ID: [0-9a-f]*/IDX_ID/ /TIMESTAMP: [0-9]*/TSTAMP/
show engine rocksdb transaction status;
--source include/wait_until_count_sessions.inc
......@@ -21,7 +21,9 @@ mysqldump2 : MariaDB's mysqldump doesn't support --print-ordering-key
native_procedure : Not supported in MariaDB
slow_query_log: MDEV-11480
select_for_update_skip_locked_nowait: MDEV-11481
create_no_primary_key_table: MariaDB doesn't have --block_create_no_primary_key
explicit_snapshot: MariaDB doesn't support Shared/Explicit snapshots
percona_nonflushing_analyze_debug : Requires Percona Server's Non-flushing ANALYZE feature
##
## Tests that do not fit MariaDB's test environment. Upstream seems to test
......@@ -34,7 +36,7 @@ rqg_transactions : Test that use RQG are disabled
allow_no_pk_concurrent_insert: stress test
rocksdb_deadlock_stress_rc: stress test
rocksdb_deadlock_stress_rr: stress test
use_direct_reads: Direct IO is not supported on all filesystems
##
## Tests which hit a problem elsewhere (Upstream, SQL layer, etc)
......
--log-bin --binlog_format=row --gtid_mode=ON --enforce_gtid_consistency --log-slave-updates
This diff is collapsed.
--rocksdb_strict_collation_check=off --binlog_format=row --log-bin
--rocksdb_strict_collation_check=off --binlog_format=row --log-bin --rocksdb_records_in_range=2
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment