Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
30d9d4e2
Commit
30d9d4e2
authored
Apr 26, 2016
by
Sergei Golubchik
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
5.6.29-76.2
parent
9a957a5b
Changes
25
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
25 changed files
with
487 additions
and
153 deletions
+487
-153
storage/tokudb/CMakeLists.txt
storage/tokudb/CMakeLists.txt
+1
-1
storage/tokudb/ha_tokudb.cc
storage/tokudb/ha_tokudb.cc
+177
-98
storage/tokudb/ha_tokudb.h
storage/tokudb/ha_tokudb.h
+31
-6
storage/tokudb/ha_tokudb_admin.cc
storage/tokudb/ha_tokudb_admin.cc
+5
-1
storage/tokudb/ha_tokudb_alter_56.cc
storage/tokudb/ha_tokudb_alter_56.cc
+2
-2
storage/tokudb/ha_tokudb_alter_common.cc
storage/tokudb/ha_tokudb_alter_common.cc
+9
-8
storage/tokudb/ha_tokudb_update.cc
storage/tokudb/ha_tokudb_update.cc
+2
-2
storage/tokudb/hatoku_cmp.cc
storage/tokudb/hatoku_cmp.cc
+7
-7
storage/tokudb/hatoku_defines.h
storage/tokudb/hatoku_defines.h
+1
-3
storage/tokudb/hatoku_hton.cc
storage/tokudb/hatoku_hton.cc
+20
-4
storage/tokudb/hatoku_hton.h
storage/tokudb/hatoku_hton.h
+0
-10
storage/tokudb/mysql-test/tokudb/r/background_job_manager.result
.../tokudb/mysql-test/tokudb/r/background_job_manager.result
+0
-0
storage/tokudb/mysql-test/tokudb/r/truncate_row_count.result
storage/tokudb/mysql-test/tokudb/r/truncate_row_count.result
+1
-1
storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result
...sql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result
+1
-1
storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result
.../mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result
+3
-3
storage/tokudb/mysql-test/tokudb_bugs/r/db917.result
storage/tokudb/mysql-test/tokudb_bugs/r/db917.result
+14
-0
storage/tokudb/mysql-test/tokudb_bugs/r/db938.result
storage/tokudb/mysql-test/tokudb_bugs/r/db938.result
+34
-0
storage/tokudb/mysql-test/tokudb_bugs/t/db917.test
storage/tokudb/mysql-test/tokudb_bugs/t/db917.test
+22
-0
storage/tokudb/mysql-test/tokudb_bugs/t/db938.test
storage/tokudb/mysql-test/tokudb_bugs/t/db938.test
+76
-0
storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test
storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test
+6
-0
storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test
storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test
+6
-0
storage/tokudb/mysql-test/tokudb_bugs/t/xa-6.test
storage/tokudb/mysql-test/tokudb_bugs/t/xa-6.test
+6
-0
storage/tokudb/tokudb_card.h
storage/tokudb/tokudb_card.h
+4
-3
storage/tokudb/tokudb_debug.h
storage/tokudb/tokudb_debug.h
+57
-1
storage/tokudb/tokudb_information_schema.cc
storage/tokudb/tokudb_information_schema.cc
+2
-2
No files found.
storage/tokudb/CMakeLists.txt
View file @
30d9d4e2
SET
(
TOKUDB_VERSION 5.6.2
8-76.1
)
SET
(
TOKUDB_VERSION 5.6.2
9-76.2
)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF
(
CMAKE_SYSTEM_PROCESSOR STREQUAL
"x86_64"
AND
NOT CMAKE_VERSION VERSION_LESS
"2.8.9"
)
...
...
storage/tokudb/ha_tokudb.cc
View file @
30d9d4e2
This diff is collapsed.
Click to expand it.
storage/tokudb/ha_tokudb.h
View file @
30d9d4e2
...
...
@@ -61,9 +61,9 @@ typedef struct loader_context {
class
TOKUDB_SHARE
{
public:
enum
share_state_t
{
CLOSED
,
OPENED
,
ERROR
CLOSED
=
0
,
OPENED
=
1
,
ERROR
=
2
};
// one time, start up init
...
...
@@ -88,6 +88,9 @@ class TOKUDB_SHARE {
// exactly 0 _use_count
static
void
drop_share
(
TOKUDB_SHARE
*
share
);
// returns state string for logging/reporting
static
const
char
*
get_state_string
(
share_state_t
state
);
void
*
operator
new
(
size_t
sz
);
void
operator
delete
(
void
*
p
);
...
...
@@ -306,7 +309,6 @@ class TOKUDB_SHARE {
// cardinality counts
uint32_t
_rec_per_keys
;
uint64_t
*
_rec_per_key
;
bool
_card_changed
;
void
init
(
const
char
*
table_name
);
void
destroy
();
...
...
@@ -315,17 +317,34 @@ inline int TOKUDB_SHARE::use_count() const {
return
_use_count
;
}
inline
void
TOKUDB_SHARE
::
lock
()
const
{
TOKUDB_SHARE_DBUG_ENTER
(
"file[%s]:state[%s]:use_count[%d]"
,
_full_table_name
.
ptr
(),
get_state_string
(
_state
),
_use_count
);
_mutex
.
lock
();
TOKUDB_SHARE_DBUG_VOID_RETURN
();
}
inline
void
TOKUDB_SHARE
::
unlock
()
const
{
TOKUDB_SHARE_DBUG_ENTER
(
"file[%s]:state[%s]:use_count[%d]"
,
_full_table_name
.
ptr
(),
get_state_string
(
_state
),
_use_count
);
_mutex
.
unlock
();
TOKUDB_SHARE_DBUG_VOID_RETURN
();
}
inline
TOKUDB_SHARE
::
share_state_t
TOKUDB_SHARE
::
state
()
const
{
return
_state
;
}
inline
void
TOKUDB_SHARE
::
set_state
(
TOKUDB_SHARE
::
share_state_t
state
)
{
TOKUDB_SHARE_DBUG_ENTER
(
"file[%s]:state[%s]:use_count[%d]:new_state[%s]"
,
_full_table_name
.
ptr
(),
get_state_string
(
_state
),
_use_count
,
get_state_string
(
state
));
assert_debug
(
_mutex
.
is_owned_by_me
());
_state
=
state
;
TOKUDB_SHARE_DBUG_VOID_RETURN
();
}
inline
const
char
*
TOKUDB_SHARE
::
full_table_name
()
const
{
return
_full_table_name
.
ptr
();
...
...
@@ -346,6 +365,13 @@ inline uint TOKUDB_SHARE::table_name_length() const {
return
_table_name
.
length
();
}
inline
void
TOKUDB_SHARE
::
set_row_count
(
uint64_t
rows
,
bool
locked
)
{
TOKUDB_SHARE_DBUG_ENTER
(
"file[%s]:state[%s]:use_count[%d]:rows[%"
PRIu64
"]:locked[%d]"
,
_full_table_name
.
ptr
(),
get_state_string
(
_state
),
_use_count
,
rows
,
locked
);
if
(
!
locked
)
{
lock
();
}
else
{
...
...
@@ -358,6 +384,7 @@ inline void TOKUDB_SHARE::set_row_count(uint64_t rows, bool locked) {
if
(
!
locked
)
{
unlock
();
}
TOKUDB_SHARE_DBUG_VOID_RETURN
();
}
inline
ha_rows
TOKUDB_SHARE
::
row_count
()
const
{
return
_rows
;
...
...
@@ -371,7 +398,6 @@ inline void TOKUDB_SHARE::init_cardinality_counts(
assert_always
(
_rec_per_key
==
NULL
&&
_rec_per_keys
==
0
);
_rec_per_keys
=
rec_per_keys
;
_rec_per_key
=
rec_per_key
;
_card_changed
=
true
;
}
inline
void
TOKUDB_SHARE
::
update_cardinality_counts
(
uint32_t
rec_per_keys
,
...
...
@@ -382,7 +408,6 @@ inline void TOKUDB_SHARE::update_cardinality_counts(
assert_always
(
rec_per_keys
==
_rec_per_keys
);
assert_always
(
rec_per_key
!=
NULL
);
memcpy
(
_rec_per_key
,
rec_per_key
,
_rec_per_keys
*
sizeof
(
uint64_t
));
_card_changed
=
true
;
}
inline
void
TOKUDB_SHARE
::
disallow_auto_analysis
()
{
assert_debug
(
_mutex
.
is_owned_by_me
());
...
...
storage/tokudb/ha_tokudb_admin.cc
View file @
30d9d4e2
...
...
@@ -374,6 +374,7 @@ void standard_t::on_run() {
_local_txn
=
false
;
}
assert_always
(
_share
->
key_file
[
0
]
!=
NULL
);
_result
=
_share
->
key_file
[
0
]
->
stat64
(
_share
->
key_file
[
0
],
_txn
,
&
stat64
);
if
(
_result
!=
0
)
{
_result
=
HA_ADMIN_FAILED
;
...
...
@@ -575,6 +576,7 @@ int standard_t::analyze_key_progress(void) {
int
standard_t
::
analyze_key
(
uint64_t
*
rec_per_key_part
)
{
int
error
=
0
;
DB
*
db
=
_share
->
key_file
[
_current_key
];
assert_always
(
db
!=
NULL
);
uint64_t
num_key_parts
=
_share
->
_key_descriptors
[
_current_key
].
_parts
;
uint64_t
unique_rows
[
num_key_parts
];
bool
is_unique
=
_share
->
_key_descriptors
[
_current_key
].
_is_unique
;
...
...
@@ -897,6 +899,7 @@ int ha_tokudb::do_optimize(THD* thd) {
}
DB
*
db
=
share
->
key_file
[
i
];
assert_always
(
db
!=
NULL
);
error
=
db
->
optimize
(
db
);
if
(
error
)
{
goto
cleanup
;
...
...
@@ -1016,7 +1019,8 @@ int ha_tokudb::check(THD* thd, HA_CHECK_OPT* check_opt) {
write_status_msg
);
}
for
(
uint
i
=
0
;
i
<
num_DBs
;
i
++
)
{
DB
*
db
=
share
->
key_file
[
i
];
DB
*
db
=
share
->
key_file
[
i
];
assert_always
(
db
!=
NULL
);
const
char
*
kname
=
i
==
primary_key
?
"primary"
:
table_share
->
key_info
[
i
].
name
;
snprintf
(
...
...
storage/tokudb/ha_tokudb_alter_56.cc
View file @
30d9d4e2
...
...
@@ -680,7 +680,7 @@ int ha_tokudb::alter_table_add_index(
KEY
*
key
=
&
key_info
[
i
];
*
key
=
ha_alter_info
->
key_info_buffer
[
ha_alter_info
->
index_add_buffer
[
i
]];
for
(
KEY_PART_INFO
*
key_part
=
key
->
key_part
;
key_part
<
key
->
key_part
+
get_key_parts
(
key
)
;
key_part
<
key
->
key_part
+
key
->
user_defined_key_parts
;
key_part
++
)
{
key_part
->
field
=
table
->
field
[
key_part
->
fieldnr
];
}
...
...
@@ -1123,7 +1123,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(
// Return true if a field is part of a key
static
bool
field_in_key
(
KEY
*
key
,
Field
*
field
)
{
for
(
uint
i
=
0
;
i
<
get_key_parts
(
key
)
;
i
++
)
{
for
(
uint
i
=
0
;
i
<
key
->
user_defined_key_parts
;
i
++
)
{
KEY_PART_INFO
*
key_part
=
&
key
->
key_part
[
i
];
if
(
strcmp
(
key_part
->
field
->
field_name
,
field
->
field_name
)
==
0
)
return
true
;
...
...
storage/tokudb/ha_tokudb_alter_common.cc
View file @
30d9d4e2
...
...
@@ -75,8 +75,8 @@ static bool tables_have_same_keys(
if
(
print_error
)
{
sql_print_error
(
"keys disagree on if they are clustering, %d, %d"
,
get_key_parts
(
curr_orig_key
)
,
get_key_parts
(
curr_altered_key
)
);
curr_orig_key
->
user_defined_key_parts
,
curr_altered_key
->
user_defined_key_parts
);
}
retval
=
false
;
goto
cleanup
;
...
...
@@ -86,18 +86,19 @@ static bool tables_have_same_keys(
if
(
print_error
)
{
sql_print_error
(
"keys disagree on if they are unique, %d, %d"
,
get_key_parts
(
curr_orig_key
)
,
get_key_parts
(
curr_altered_key
)
);
curr_orig_key
->
user_defined_key_parts
,
curr_altered_key
->
user_defined_key_parts
);
}
retval
=
false
;
goto
cleanup
;
}
if
(
get_key_parts
(
curr_orig_key
)
!=
get_key_parts
(
curr_altered_key
))
{
if
(
curr_orig_key
->
user_defined_key_parts
!=
curr_altered_key
->
user_defined_key_parts
)
{
if
(
print_error
)
{
sql_print_error
(
"keys have different number of parts, %d, %d"
,
get_key_parts
(
curr_orig_key
)
,
get_key_parts
(
curr_altered_key
)
);
curr_orig_key
->
user_defined_key_parts
,
curr_altered_key
->
user_defined_key_parts
);
}
retval
=
false
;
goto
cleanup
;
...
...
@@ -105,7 +106,7 @@ static bool tables_have_same_keys(
//
// now verify that each field in the key is the same
//
for
(
uint32_t
j
=
0
;
j
<
get_key_parts
(
curr_orig_key
)
;
j
++
)
{
for
(
uint32_t
j
=
0
;
j
<
curr_orig_key
->
user_defined_key_parts
;
j
++
)
{
KEY_PART_INFO
*
curr_orig_part
=
&
curr_orig_key
->
key_part
[
j
];
KEY_PART_INFO
*
curr_altered_part
=
&
curr_altered_key
->
key_part
[
j
];
Field
*
curr_orig_field
=
curr_orig_part
->
field
;
...
...
storage/tokudb/ha_tokudb_update.cc
View file @
30d9d4e2
...
...
@@ -453,7 +453,7 @@ static bool check_all_update_expressions(
static
bool
full_field_in_key
(
TABLE
*
table
,
Field
*
field
)
{
assert_always
(
table
->
s
->
primary_key
<
table
->
s
->
keys
);
KEY
*
key
=
&
table
->
s
->
key_info
[
table
->
s
->
primary_key
];
for
(
uint
i
=
0
;
i
<
get_key_parts
(
key
)
;
i
++
)
{
for
(
uint
i
=
0
;
i
<
key
->
user_defined_key_parts
;
i
++
)
{
KEY_PART_INFO
*
key_part
=
&
key
->
key_part
[
i
];
if
(
strcmp
(
field
->
field_name
,
key_part
->
field
->
field_name
)
==
0
)
{
return
key_part
->
length
==
field
->
field_length
;
...
...
@@ -519,7 +519,7 @@ static bool check_point_update(Item* conds, TABLE* table) {
if
(
bitmap_init
(
&
pk_fields
,
NULL
,
table
->
s
->
fields
,
FALSE
))
// 1 -> failure
return
false
;
KEY
*
key
=
&
table
->
s
->
key_info
[
table
->
s
->
primary_key
];
for
(
uint
i
=
0
;
i
<
get_key_parts
(
key
);
i
++
)
for
(
uint
i
=
0
;
i
<
key
->
user_defined_key_parts
;
i
++
)
bitmap_set_bit
(
&
pk_fields
,
key
->
key_part
[
i
].
field
->
field_index
);
switch
(
conds
->
type
())
{
...
...
storage/tokudb/hatoku_cmp.cc
View file @
30d9d4e2
...
...
@@ -1010,7 +1010,7 @@ static int create_toku_key_descriptor_for_key(KEY* key, uchar* buf) {
uchar
*
pos
=
buf
;
uint32_t
num_bytes_in_field
=
0
;
uint32_t
charset_num
=
0
;
for
(
uint
i
=
0
;
i
<
get_key_parts
(
key
);
i
++
)
{
for
(
uint
i
=
0
;
i
<
key
->
user_defined_key_parts
;
i
++
)
{
Field
*
field
=
key
->
key_part
[
i
].
field
;
//
// The first byte states if there is a null byte
...
...
@@ -1881,7 +1881,7 @@ static uint32_t pack_desc_pk_offset_info(
bool
is_constant_offset
=
true
;
uint32_t
offset
=
0
;
for
(
uint
i
=
0
;
i
<
get_key_parts
(
prim_key
)
;
i
++
)
{
for
(
uint
i
=
0
;
i
<
prim_key
->
user_defined_key_parts
;
i
++
)
{
KEY_PART_INFO
curr
=
prim_key
->
key_part
[
i
];
uint16
curr_field_index
=
curr
.
field
->
field_index
;
...
...
@@ -2503,8 +2503,8 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
//
// store number of parts
//
assert_always
(
get_key_parts
(
prim_key
)
<
128
);
pos
[
0
]
=
2
*
get_key_parts
(
prim_key
)
;
assert_always
(
prim_key
->
user_defined_key_parts
<
128
);
pos
[
0
]
=
2
*
prim_key
->
user_defined_key_parts
;
pos
++
;
//
// for each part, store if it is a fixed field or var field
...
...
@@ -2514,7 +2514,7 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
//
pk_info
=
pos
;
uchar
*
tmp
=
pos
;
for
(
uint
i
=
0
;
i
<
get_key_parts
(
prim_key
)
;
i
++
)
{
for
(
uint
i
=
0
;
i
<
prim_key
->
user_defined_key_parts
;
i
++
)
{
tmp
+=
pack_desc_pk_info
(
tmp
,
kc_info
,
...
...
@@ -2525,11 +2525,11 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
//
// asserting that we moved forward as much as we think we have
//
assert_always
(
tmp
-
pos
==
(
2
*
get_key_parts
(
prim_key
)
));
assert_always
(
tmp
-
pos
==
(
2
*
prim_key
->
user_defined_key_parts
));
pos
=
tmp
;
}
for
(
uint
i
=
0
;
i
<
get_key_parts
(
key_info
)
;
i
++
)
{
for
(
uint
i
=
0
;
i
<
key_info
->
user_defined_key_parts
;
i
++
)
{
KEY_PART_INFO
curr_kpi
=
key_info
->
key_part
[
i
];
uint16
field_index
=
curr_kpi
.
field
->
field_index
;
Field
*
field
=
table_share
->
field
[
field_index
];
...
...
storage/tokudb/hatoku_defines.h
View file @
30d9d4e2
...
...
@@ -36,10 +36,8 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "sql_class.h"
#include "sql_show.h"
#include "discover.h"
#if (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
#include <binlog.h>
#
endif
#
include "debug_sync.h"
#undef PACKAGE
#undef VERSION
...
...
storage/tokudb/hatoku_hton.cc
View file @
30d9d4e2
...
...
@@ -674,6 +674,7 @@ int tokudb_end(handlerton* hton, ha_panic_function type) {
// count the total number of prepared txn's that we discard
long
total_prepared
=
0
;
#if TOKU_INCLUDE_XA
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_XA
,
"begin XA cleanup"
);
while
(
1
)
{
// get xid's
const
long
n_xid
=
1
;
...
...
@@ -698,6 +699,7 @@ int tokudb_end(handlerton* hton, ha_panic_function type) {
}
total_prepared
+=
n_prepared
;
}
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_XA
,
"end XA cleanup"
);
#endif
error
=
db_env
->
close
(
db_env
,
...
...
@@ -922,19 +924,25 @@ static int tokudb_rollback(handlerton * hton, THD * thd, bool all) {
#if TOKU_INCLUDE_XA
static
bool
tokudb_sync_on_prepare
(
void
)
{
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_XA
,
"enter"
);
// skip sync of log if fsync log period > 0
if
(
tokudb
::
sysvars
::
fsync_log_period
>
0
)
if
(
tokudb
::
sysvars
::
fsync_log_period
>
0
)
{
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_XA
,
"exit"
);
return
false
;
else
}
else
{
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_XA
,
"exit"
);
return
true
;
}
}
static
int
tokudb_xa_prepare
(
handlerton
*
hton
,
THD
*
thd
,
bool
all
)
{
TOKUDB_DBUG_ENTER
(
""
);
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_XA
,
"enter"
);
int
r
=
0
;
// if tokudb_support_xa is disable, just return
if
(
!
tokudb
::
sysvars
::
support_xa
(
thd
))
{
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_XA
,
"exit %d"
,
r
);
TOKUDB_DBUG_RETURN
(
r
);
}
...
...
@@ -944,7 +952,7 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
if
(
txn
)
{
uint32_t
syncflag
=
tokudb_sync_on_prepare
()
?
0
:
DB_TXN_NOSYNC
;
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_
TXN
,
TOKUDB_DEBUG_
XA
,
"doing txn prepare:%d:%p"
,
all
,
txn
);
...
...
@@ -957,15 +965,18 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
// test hook to induce a crash on a debug build
DBUG_EXECUTE_IF
(
"tokudb_crash_prepare_after"
,
DBUG_SUICIDE
(););
}
else
{
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_
TXN
,
"nothing to prepare %d"
,
all
);
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_
XA
,
"nothing to prepare %d"
,
all
);
}
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_XA
,
"exit %d"
,
r
);
TOKUDB_DBUG_RETURN
(
r
);
}
static
int
tokudb_xa_recover
(
handlerton
*
hton
,
XID
*
xid_list
,
uint
len
)
{
TOKUDB_DBUG_ENTER
(
""
);
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_XA
,
"enter"
);
int
r
=
0
;
if
(
len
==
0
||
xid_list
==
NULL
)
{
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_XA
,
"exit %d"
,
0
);
TOKUDB_DBUG_RETURN
(
0
);
}
long
num_returned
=
0
;
...
...
@@ -976,11 +987,13 @@ static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) {
&
num_returned
,
DB_NEXT
);
assert_always
(
r
==
0
);
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_XA
,
"exit %ld"
,
num_returned
);
TOKUDB_DBUG_RETURN
((
int
)
num_returned
);
}
static
int
tokudb_commit_by_xid
(
handlerton
*
hton
,
XID
*
xid
)
{
TOKUDB_DBUG_ENTER
(
""
);
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_XA
,
"enter"
);
int
r
=
0
;
DB_TXN
*
txn
=
NULL
;
TOKU_XA_XID
*
toku_xid
=
(
TOKU_XA_XID
*
)
xid
;
...
...
@@ -993,11 +1006,13 @@ static int tokudb_commit_by_xid(handlerton* hton, XID* xid) {
r
=
0
;
cleanup:
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_XA
,
"exit %d"
,
r
);
TOKUDB_DBUG_RETURN
(
r
);
}
static
int
tokudb_rollback_by_xid
(
handlerton
*
hton
,
XID
*
xid
)
{
TOKUDB_DBUG_ENTER
(
""
);
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_XA
,
"enter"
);
int
r
=
0
;
DB_TXN
*
txn
=
NULL
;
TOKU_XA_XID
*
toku_xid
=
(
TOKU_XA_XID
*
)
xid
;
...
...
@@ -1010,6 +1025,7 @@ static int tokudb_rollback_by_xid(handlerton* hton, XID* xid) {
r
=
0
;
cleanup:
TOKUDB_TRACE_FOR_FLAGS
(
TOKUDB_DEBUG_XA
,
"exit %d"
,
r
);
TOKUDB_DBUG_RETURN
(
r
);
}
...
...
storage/tokudb/hatoku_hton.h
View file @
30d9d4e2
...
...
@@ -199,14 +199,4 @@ void tokudb_pretty_left_key(const DB* db, const DBT* key, String* out);
void
tokudb_pretty_right_key
(
const
DB
*
db
,
const
DBT
*
key
,
String
*
out
);
const
char
*
tokudb_get_index_name
(
DB
*
db
);
inline
uint
get_key_parts
(
const
KEY
*
key
)
{
#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
(50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799) || \
(100009 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099)
return
key
->
user_defined_key_parts
;
#else
return
key
->
key_parts
;
#endif
}
#endif //#ifdef _HATOKU_HTON
storage/tokudb/mysql-test/tokudb/r/background_job_manager.result
View file @
30d9d4e2
No preview for this file type
storage/tokudb/mysql-test/tokudb/r/truncate_row_count.result
View file @
30d9d4e2
...
...
@@ -14,5 +14,5 @@ select * from t;
a b
select TABLE_ROWS from information_schema.tables where table_schema='test' and table_name='t';
TABLE_ROWS
1
0
drop table t;
storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result
View file @
30d9d4e2
...
...
@@ -17,5 +17,5 @@ test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 7 NULL NULL BTREE
t 1 x 1 x A
7
NULL NULL YES BTREE
t 1 x 1 x A
3
NULL NULL YES BTREE
drop table t;
storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result
View file @
30d9d4e2
...
...
@@ -15,7 +15,7 @@ test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 5 NULL NULL BTREE
t 1 x 1 x A
5
NULL NULL YES BTREE
t 1 x 1 x A
2
NULL NULL YES BTREE
t 1 y 1 y A 5 NULL NULL YES BTREE
alter table t analyze partition p1;
Table Op Msg_type Msg_text
...
...
@@ -23,13 +23,13 @@ test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 5 NULL NULL BTREE
t 1 x 1 x A
5
NULL NULL YES BTREE
t 1 x 1 x A
2
NULL NULL YES BTREE
t 1 y 1 y A 5 NULL NULL YES BTREE
insert into t values (100,1,1),(200,2,1),(300,3,1),(400,4,1),(500,5,1);
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 9 NULL NULL BTREE
t 1 x 1 x A
9
NULL NULL YES BTREE
t 1 x 1 x A
4
NULL NULL YES BTREE
t 1 y 1 y A 9 NULL NULL YES BTREE
alter table t analyze partition p0;
Table Op Msg_type Msg_text
...
...
storage/tokudb/mysql-test/tokudb_bugs/r/db917.result
0 → 100644
View file @
30d9d4e2
drop table if exists t1;
set @orig_table_open_cache = @@global.table_open_cache;
create table t1(a int) engine = tokudb partition by key(a) partitions 2 (partition p0 engine = tokudb, partition p1 engine = tokudb);
lock tables t1 read;
set @@global.table_open_cache = 1;
begin;
insert into t1 values(1),(1);
select * from t1 where c like _ucs2 0x039C0025 collate ucs2_unicode_ci;
ERROR 42S22: Unknown column 'c' in 'where clause'
create table t1(c1 binary (1), c2 varbinary(1));
ERROR 42S01: Table 't1' already exists
unlock tables;
drop table t1;
set @@global.table_open_cache = @orig_table_open_cache;
storage/tokudb/mysql-test/tokudb_bugs/r/db938.result
0 → 100644
View file @
30d9d4e2
set @orig_auto_analyze = @@session.tokudb_auto_analyze;
set @orig_in_background = @@session.tokudb_analyze_in_background;
set @orig_mode = @@session.tokudb_analyze_mode;
set @orig_throttle = @@session.tokudb_analyze_throttle;
set @orig_time = @@session.tokudb_analyze_time;
set @orig_scale_percent = @@global.tokudb_cardinality_scale_percent;
set @orig_default_storage_engine = @@session.default_storage_engine;
set @orig_pause_background_job_manager = @@global.tokudb_debug_pause_background_job_manager;
set session default_storage_engine = 'tokudb';
set session tokudb_auto_analyze = 1;
set session tokudb_analyze_in_background = 1;
set session tokudb_analyze_mode = tokudb_analyze_standard;
set session tokudb_analyze_throttle = 0;
set session tokudb_analyze_time = 0;
set global tokudb_cardinality_scale_percent = DEFAULT;
set global tokudb_debug_pause_background_job_manager = TRUE;
create table t1 (a int not null auto_increment, b int, c int, primary key(a), key kb(b), key kc(c), key kabc(a,b,c), key kab(a,b), key kbc(b,c));
insert into t1(b,c) values(0,0), (1,1), (2,2), (3,3);
select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
database_name table_name job_type job_params scheduler
test t1 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; AUTO
set DEBUG_SYNC = 'tokudb_after_truncate_all_dictionarys SIGNAL closed WAIT_FOR done';
TRUNCATE TABLE t1;
set global tokudb_debug_pause_background_job_manager = FALSE;
set DEBUG_SYNC = 'now SIGNAL done';
drop table t1;
set session tokudb_auto_analyze = @orig_auto_analyze;
set session tokudb_analyze_in_background = @orig_in_background;
set session tokudb_analyze_mode = @orig_mode;
set session tokudb_analyze_throttle = @orig_throttle;
set session tokudb_analyze_time = @orig_time;
set global tokudb_cardinality_scale_percent = @orig_scale_percent;
set session default_storage_engine = @orig_default_storage_engine;
set global tokudb_debug_pause_background_job_manager = @orig_pause_background_job_manager;
storage/tokudb/mysql-test/tokudb_bugs/t/db917.test
0 → 100644
View file @
30d9d4e2
# test DB-917
# test that table/share open lock timeout does not crash the server on subsequent access
source
include
/
have_tokudb
.
inc
;
disable_warnings
;
drop
table
if
exists
t1
;
enable_warnings
;
set
@
orig_table_open_cache
=
@@
global
.
table_open_cache
;
create
table
t1
(
a
int
)
engine
=
tokudb
partition
by
key
(
a
)
partitions
2
(
partition
p0
engine
=
tokudb
,
partition
p1
engine
=
tokudb
);
lock
tables
t1
read
;
set
@@
global
.
table_open_cache
=
1
;
begin
;
insert
into
t1
values
(
1
),(
1
);
# when the bug is present, this results in a lock wait timeout
--
error
ER_BAD_FIELD_ERROR
select
*
from
t1
where
c
like
_ucs2
0x039C0025
collate
ucs2_unicode_ci
;
# when the bug exists, this results in the assertion
# kc_info->cp_info[keynr] == NULL in tokudb/ha_tokudb.cc initialize_col_pack_info
--
error
ER_TABLE_EXISTS_ERROR
create
table
t1
(
c1
binary
(
1
),
c2
varbinary
(
1
));
unlock
tables
;
drop
table
t1
;
set
@@
global
.
table_open_cache
=
@
orig_table_open_cache
;
storage/tokudb/mysql-test/tokudb_bugs/t/db938.test
0 → 100644
View file @
30d9d4e2
# This test for DB-938 tests a race condition where a scheduled background job
# (analyze) ends up operating on a set of DB* key_file[] in TOKUDB_SHARE that
# were set to NULL during a TRUNCATE TABLE operation.
--
source
include
/
have_tokudb
.
inc
--
source
include
/
have_debug
.
inc
--
source
include
/
have_debug_sync
.
inc
--
enable_query_log
set
@
orig_auto_analyze
=
@@
session
.
tokudb_auto_analyze
;
set
@
orig_in_background
=
@@
session
.
tokudb_analyze_in_background
;
set
@
orig_mode
=
@@
session
.
tokudb_analyze_mode
;
set
@
orig_throttle
=
@@
session
.
tokudb_analyze_throttle
;
set
@
orig_time
=
@@
session
.
tokudb_analyze_time
;
set
@
orig_scale_percent
=
@@
global
.
tokudb_cardinality_scale_percent
;
set
@
orig_default_storage_engine
=
@@
session
.
default_storage_engine
;
set
@
orig_pause_background_job_manager
=
@@
global
.
tokudb_debug_pause_background_job_manager
;
# first, lets set up to auto analyze in the background with about any activity
set
session
default_storage_engine
=
'tokudb'
;
set
session
tokudb_auto_analyze
=
1
;
set
session
tokudb_analyze_in_background
=
1
;
set
session
tokudb_analyze_mode
=
tokudb_analyze_standard
;
set
session
tokudb_analyze_throttle
=
0
;
set
session
tokudb_analyze_time
=
0
;
set
global
tokudb_cardinality_scale_percent
=
DEFAULT
;
# in debug build, we can prevent the background job manager from running,
# let's do it to hold a job from running until we get the TRUNCATE TABLE
# in action
set
global
tokudb_debug_pause_background_job_manager
=
TRUE
;
create
table
t1
(
a
int
not
null
auto_increment
,
b
int
,
c
int
,
primary
key
(
a
),
key
kb
(
b
),
key
kc
(
c
),
key
kabc
(
a
,
b
,
c
),
key
kab
(
a
,
b
),
key
kbc
(
b
,
c
));
insert
into
t1
(
b
,
c
)
values
(
0
,
0
),
(
1
,
1
),
(
2
,
2
),
(
3
,
3
);
# insert above should have triggered an analyze, but since the bjm is paused,
# we will see it sitting in the queue
select
database_name
,
table_name
,
job_type
,
job_params
,
scheduler
from
information_schema
.
tokudb_background_job_status
;
# lets flip to another connection
connect
(
conn1
,
localhost
,
root
);
# set up the DEBUG_SYNC point
set
DEBUG_SYNC
=
'tokudb_after_truncate_all_dictionarys SIGNAL closed WAIT_FOR done'
;
# send the truncat table
send
TRUNCATE
TABLE
t1
;
# back to default connection
connection
default
;
# release the bjm
set
global
tokudb_debug_pause_background_job_manager
=
FALSE
;
# if the bug is present, the bjm should crash here within 1/4 of a second
sleep
5
;
# lets release and clean up
set
DEBUG_SYNC
=
'now SIGNAL done'
;
connection
conn1
;
reap
;
connection
default
;
disconnect
conn1
;
drop
table
t1
;
set
session
tokudb_auto_analyze
=
@
orig_auto_analyze
;
set
session
tokudb_analyze_in_background
=
@
orig_in_background
;
set
session
tokudb_analyze_mode
=
@
orig_mode
;
set
session
tokudb_analyze_throttle
=
@
orig_throttle
;
set
session
tokudb_analyze_time
=
@
orig_time
;
set
global
tokudb_cardinality_scale_percent
=
@
orig_scale_percent
;
set
session
default_storage_engine
=
@
orig_default_storage_engine
;
set
global
tokudb_debug_pause_background_job_manager
=
@
orig_pause_background_job_manager
;
storage/tokudb/mysql-test/tokudb_bugs/t/xa-3.test
View file @
30d9d4e2
--
source
include
/
have_innodb
.
inc
--
source
include
/
have_tokudb
.
inc
--
source
include
/
have_debug
.
inc
# Valgrind would report memory leaks on the intentional crashes
--
source
include
/
not_valgrind
.
inc
# Embedded server does not support crashing
--
source
include
/
not_embedded
.
inc
# Avoid CrashReporter popup on Mac
--
source
include
/
not_crashrep
.
inc
--
disable_warnings
drop
table
if
exists
t1
,
t2
;
...
...
storage/tokudb/mysql-test/tokudb_bugs/t/xa-4.test
View file @
30d9d4e2
--
source
include
/
have_innodb
.
inc
--
source
include
/
have_tokudb
.
inc
--
source
include
/
have_debug
.
inc
# Valgrind would report memory leaks on the intentional crashes
--
source
include
/
not_valgrind
.
inc
# Embedded server does not support crashing
--
source
include
/
not_embedded
.
inc
# Avoid CrashReporter popup on Mac
--
source
include
/
not_crashrep
.
inc
--
disable_warnings
drop
table
if
exists
t1
,
t2
;
...
...
storage/tokudb/mysql-test/tokudb_bugs/t/xa-6.test
View file @
30d9d4e2
--
source
include
/
have_tokudb
.
inc
--
source
include
/
have_debug
.
inc
# Valgrind would report memory leaks on the intentional crashes
--
source
include
/
not_valgrind
.
inc
# Embedded server does not support crashing
--
source
include
/
not_embedded
.
inc
# Avoid CrashReporter popup on Mac
--
source
include
/
not_crashrep
.
inc
--
disable_warnings
drop
table
if
exists
t1
;
...
...
storage/tokudb/tokudb_card.h
View file @
30d9d4e2
...
...
@@ -27,7 +27,7 @@ namespace tokudb {
uint
compute_total_key_parts
(
TABLE_SHARE
*
table_share
)
{
uint
total_key_parts
=
0
;
for
(
uint
i
=
0
;
i
<
table_share
->
keys
;
i
++
)
{
total_key_parts
+=
get_key_parts
(
&
table_share
->
key_info
[
i
])
;
total_key_parts
+=
table_share
->
key_info
[
i
].
user_defined_key_parts
;
}
return
total_key_parts
;
}
...
...
@@ -156,13 +156,14 @@ namespace tokudb {
uint
orig_key_parts
=
0
;
for
(
uint
i
=
0
;
i
<
table_share
->
keys
;
i
++
)
{
orig_key_offset
[
i
]
=
orig_key_parts
;
orig_key_parts
+=
get_key_parts
(
&
table_share
->
key_info
[
i
])
;
orig_key_parts
+=
table_share
->
key_info
[
i
].
user_defined_key_parts
;
}
// if orig card data exists, then use it to compute new card data
if
(
error
==
0
)
{
uint
next_key_parts
=
0
;
for
(
uint
i
=
0
;
error
==
0
&&
i
<
altered_table_share
->
keys
;
i
++
)
{
uint
ith_key_parts
=
get_key_parts
(
&
altered_table_share
->
key_info
[
i
]);
uint
ith_key_parts
=
altered_table_share
->
key_info
[
i
].
user_defined_key_parts
;
uint
orig_key_index
;
if
(
find_index_of_key
(
altered_table_share
->
key_info
[
i
].
name
,
...
...
storage/tokudb/tokudb_debug.h
View file @
30d9d4e2
...
...
@@ -50,6 +50,8 @@ static void tokudb_backtrace(void);
#define TOKUDB_DEBUG_UPSERT (1<<12)
#define TOKUDB_DEBUG_CHECK (1<<13)
#define TOKUDB_DEBUG_ANALYZE (1<<14)
#define TOKUDB_DEBUG_XA (1<<15)
#define TOKUDB_DEBUG_SHARE (1<<16)
#define TOKUDB_TRACE(_fmt, ...) { \
fprintf(stderr, "%u %s:%u %s " _fmt "\n", tokudb::thread::my_tid(), \
...
...
@@ -124,7 +126,6 @@ static void tokudb_backtrace(void);
DBUG_RETURN(r); \
}
#define TOKUDB_HANDLER_DBUG_VOID_RETURN { \
if (TOKUDB_UNLIKELY(tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN)) { \
TOKUDB_HANDLER_TRACE("return"); \
...
...
@@ -132,6 +133,61 @@ static void tokudb_backtrace(void);
DBUG_VOID_RETURN; \
}
#define TOKUDB_SHARE_TRACE(_fmt, ...) \
fprintf(stderr, "%u %p %s:%u TOUDB_SHARE::%s " _fmt "\n", \
tokudb::thread::my_tid(), this, __FILE__, __LINE__, \
__FUNCTION__, ##__VA_ARGS__);
#define TOKUDB_SHARE_TRACE_FOR_FLAGS(_flags, _fmt, ...) { \
if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(_flags))) { \
TOKUDB_SHARE_TRACE(_fmt, ##__VA_ARGS__); \
} \
}
#define TOKUDB_SHARE_DBUG_ENTER(_fmt, ...) { \
if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_ENTER) || \
(tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
TOKUDB_SHARE_TRACE(_fmt, ##__VA_ARGS__); \
} \
} \
DBUG_ENTER(__FUNCTION__);
#define TOKUDB_SHARE_DBUG_RETURN(r) { \
int rr = (r); \
if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
(tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE) || \
(rr != 0 && (tokudb::sysvars::debug & TOKUDB_DEBUG_ERROR)))) { \
TOKUDB_SHARE_TRACE("return %d", rr); \
} \
DBUG_RETURN(rr); \
}
#define TOKUDB_SHARE_DBUG_RETURN_DOUBLE(r) { \
double rr = (r); \
if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
(tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
TOKUDB_SHARE_TRACE("return %f", rr); \
} \
DBUG_RETURN(rr); \
}
#define TOKUDB_SHARE_DBUG_RETURN_PTR(r) { \
if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
(tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
TOKUDB_SHARE_TRACE("return 0x%p", r); \
} \
DBUG_RETURN(r); \
}
#define TOKUDB_SHARE_DBUG_VOID_RETURN() { \
if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
(tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
TOKUDB_SHARE_TRACE("return"); \
} \
DBUG_VOID_RETURN; \
}
#define TOKUDB_DBUG_DUMP(s, p, len) \
{ \
TOKUDB_TRACE("%s", s); \
...
...
storage/tokudb/tokudb_information_schema.cc
View file @
30d9d4e2
...
...
@@ -1119,9 +1119,9 @@ void background_job_status_callback(
table
->
field
[
3
]
->
store
(
type
,
strlen
(
type
),
system_charset_info
);
table
->
field
[
4
]
->
store
(
params
,
strlen
(
params
),
system_charset_info
);
if
(
user_scheduled
)
table
->
field
[
5
]
->
store
(
"USER"
,
s
izeof
(
"USER"
),
system_charset_info
);
table
->
field
[
5
]
->
store
(
"USER"
,
s
trlen
(
"USER"
),
system_charset_info
);
else
table
->
field
[
5
]
->
store
(
"AUTO"
,
s
izeof
(
"AUTO"
),
system_charset_info
);
table
->
field
[
5
]
->
store
(
"AUTO"
,
s
trlen
(
"AUTO"
),
system_charset_info
);
field_store_time_t
(
table
->
field
[
6
],
scheduled_time
);
field_store_time_t
(
table
->
field
[
7
],
started_time
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment