Commit 30d9d4e2 authored by Sergei Golubchik's avatar Sergei Golubchik

5.6.29-76.2

parent 9a957a5b
SET(TOKUDB_VERSION 5.6.28-76.1)
SET(TOKUDB_VERSION 5.6.29-76.2)
# PerconaFT only supports x86-64 and cmake-2.8.9+
IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND
NOT CMAKE_VERSION VERSION_LESS "2.8.9")
......
......@@ -161,6 +161,15 @@ void TOKUDB_SHARE::static_init() {
void TOKUDB_SHARE::static_destroy() {
my_hash_free(&_open_tables);
}
const char* TOKUDB_SHARE::get_state_string(share_state_t state) {
static const char* state_string[] = {
"CLOSED",
"OPENED",
"ERROR"
};
assert_always(state == CLOSED || state == OPENED || state == ERROR);
return state_string[state];
}
void* TOKUDB_SHARE::operator new(size_t sz) {
return tokudb::memory::malloc(sz, MYF(MY_WME|MY_ZEROFILL|MY_FAE));
}
......@@ -186,12 +195,24 @@ void TOKUDB_SHARE::init(const char* table_name) {
_database_name,
_table_name,
tmp_dictionary_name);
TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
_full_table_name.ptr(),
get_state_string(_state),
_use_count);
TOKUDB_SHARE_DBUG_VOID_RETURN();
}
void TOKUDB_SHARE::destroy() {
TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
_full_table_name.ptr(),
get_state_string(_state),
_use_count);
assert_always(_use_count == 0);
assert_always(
_state == TOKUDB_SHARE::CLOSED || _state == TOKUDB_SHARE::ERROR);
thr_lock_delete(&_thr_lock);
TOKUDB_SHARE_DBUG_VOID_RETURN();
}
TOKUDB_SHARE* TOKUDB_SHARE::get_share(
const char* table_name,
......@@ -207,6 +228,14 @@ TOKUDB_SHARE* TOKUDB_SHARE::get_share(
&_open_tables,
(uchar*)table_name,
length);
TOKUDB_TRACE_FOR_FLAGS(
TOKUDB_DEBUG_SHARE,
"existing share[%s] %s:share[%p]",
table_name,
share == NULL ? "not found" : "found",
share);
if (!share) {
if (create_new == false)
goto exit;
......@@ -237,25 +266,41 @@ TOKUDB_SHARE* TOKUDB_SHARE::get_share(
return share;
}
void TOKUDB_SHARE::drop_share(TOKUDB_SHARE* share) {
TOKUDB_TRACE_FOR_FLAGS(
TOKUDB_DEBUG_SHARE,
"share[%p]:file[%s]:state[%s]:use_count[%d]",
share,
share->_full_table_name.ptr(),
get_state_string(share->_state),
share->_use_count);
_open_tables_mutex.lock();
my_hash_delete(&_open_tables, (uchar*)share);
_open_tables_mutex.unlock();
}
TOKUDB_SHARE::share_state_t TOKUDB_SHARE::addref() {
TOKUDB_SHARE_TRACE_FOR_FLAGS((TOKUDB_DEBUG_ENTER & TOKUDB_DEBUG_SHARE),
"file[%s]:state[%s]:use_count[%d]",
_full_table_name.ptr(),
get_state_string(_state),
_use_count);
lock();
_use_count++;
DBUG_PRINT("info", ("0x%p share->_use_count %u", this, _use_count));
return _state;
}
int TOKUDB_SHARE::release() {
TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
_full_table_name.ptr(),
get_state_string(_state),
_use_count);
int error, result = 0;
_mutex.lock();
assert_always(_use_count != 0);
_use_count--;
DBUG_PRINT("info", ("0x%p share->_use_count %u", this, _use_count));
if (_use_count == 0 && _state == TOKUDB_SHARE::OPENED) {
// number of open DB's may not be equal to number of keys we have
// because add_index may have added some. So, we loop through entire
......@@ -299,7 +344,7 @@ int TOKUDB_SHARE::release() {
}
_mutex.unlock();
return result;
TOKUDB_SHARE_DBUG_RETURN(result);
}
void TOKUDB_SHARE::update_row_count(
THD* thd,
......@@ -350,34 +395,32 @@ void TOKUDB_SHARE::update_row_count(
unlock();
}
void TOKUDB_SHARE::set_cardinality_counts_in_table(TABLE* table) {
// if there is nothing new to report, just skip it.
if (_card_changed) {
lock();
uint32_t next_key_part = 0;
for (uint32_t i = 0; i < table->s->keys; i++) {
bool is_unique_key =
(i == table->s->primary_key) ||
(table->key_info[i].flags & HA_NOSAME);
uint32_t num_key_parts = get_key_parts(&table->key_info[i]);
for (uint32_t j = 0; j < num_key_parts; j++) {
assert_always(next_key_part < _rec_per_keys);
ulong val = _rec_per_key[next_key_part++];
if (is_unique_key && j == num_key_parts-1) {
val = 1;
} else {
val =
(val*tokudb::sysvars::cardinality_scale_percent)/100;
if (val == 0)
val = 1;
}
lock();
uint32_t next_key_part = 0;
for (uint32_t i = 0; i < table->s->keys; i++) {
KEY* key = &table->key_info[i];
bool is_unique_key =
(i == table->s->primary_key) || (key->flags & HA_NOSAME);
for (uint32_t j = 0; j < key->actual_key_parts; j++) {
if (j >= key->user_defined_key_parts) {
// MySQL 'hidden' keys, really needs deeper investigation
// into MySQL hidden keys vs TokuDB hidden keys
key->rec_per_key[j] = 1;
continue;
}
table->key_info[i].rec_per_key[j] = val;
assert_always(next_key_part < _rec_per_keys);
ulong val = _rec_per_key[next_key_part++];
val = (val * tokudb::sysvars::cardinality_scale_percent) / 100;
if (val == 0 || _rows == 0 ||
(is_unique_key && j == key->actual_key_parts - 1)) {
val = 1;
}
key->rec_per_key[j] = val;
}
_card_changed = false;
unlock();
}
unlock();
}
#define HANDLE_INVALID_CURSOR() \
......@@ -771,29 +814,36 @@ static int filter_key_part_compare (const void* left, const void* right) {
// if key, table have proper info set. I had to verify by checking
// in the debugger.
//
void set_key_filter(MY_BITMAP* key_filter, KEY* key, TABLE* table, bool get_offset_from_keypart) {
void set_key_filter(
MY_BITMAP* key_filter,
KEY* key,
TABLE* table,
bool get_offset_from_keypart) {
FILTER_KEY_PART_INFO parts[MAX_REF_PARTS];
uint curr_skip_index = 0;
for (uint i = 0; i < get_key_parts(key); i++) {
for (uint i = 0; i < key->user_defined_key_parts; i++) {
//
// horrendous hack due to bugs in mysql, basically
// we cannot always reliably get the offset from the same source
//
parts[i].offset = get_offset_from_keypart ? key->key_part[i].offset : field_offset(key->key_part[i].field, table);
parts[i].offset =
get_offset_from_keypart ?
key->key_part[i].offset :
field_offset(key->key_part[i].field, table);
parts[i].part_index = i;
}
qsort(
parts, // start of array
get_key_parts(key), //num elements
key->user_defined_key_parts, //num elements
sizeof(*parts), //size of each element
filter_key_part_compare
);
filter_key_part_compare);
for (uint i = 0; i < table->s->fields; i++) {
Field* field = table->field[i];
uint curr_field_offset = field_offset(field, table);
if (curr_skip_index < get_key_parts(key)) {
if (curr_skip_index < key->user_defined_key_parts) {
uint curr_skip_offset = 0;
curr_skip_offset = parts[curr_skip_index].offset;
if (curr_skip_offset == curr_field_offset) {
......@@ -1595,7 +1645,11 @@ static int initialize_key_and_col_info(
return error;
}
bool ha_tokudb::can_replace_into_be_fast(TABLE_SHARE* table_share, KEY_AND_COL_INFO* kc_info, uint pk) {
bool ha_tokudb::can_replace_into_be_fast(
TABLE_SHARE* table_share,
KEY_AND_COL_INFO* kc_info,
uint pk) {
uint curr_num_DBs = table_share->keys + tokudb_test(hidden_primary_key);
bool ret_val;
if (curr_num_DBs == 1) {
......@@ -1606,7 +1660,7 @@ bool ha_tokudb::can_replace_into_be_fast(TABLE_SHARE* table_share, KEY_AND_COL_I
for (uint curr_index = 0; curr_index < table_share->keys; curr_index++) {
if (curr_index == pk) continue;
KEY* curr_key_info = &table_share->key_info[curr_index];
for (uint i = 0; i < get_key_parts(curr_key_info); i++) {
for (uint i = 0; i < curr_key_info->user_defined_key_parts; i++) {
uint16 curr_field_index = curr_key_info->key_part[i].field->field_index;
if (!bitmap_is_set(&kc_info->key_filters[curr_index],curr_field_index)) {
ret_val = false;
......@@ -1692,7 +1746,8 @@ int ha_tokudb::initialize_share(const char* name, int mode) {
/* Open other keys; These are part of the share structure */
for (uint i = 0; i < table_share->keys; i++) {
share->_key_descriptors[i]._parts = get_key_parts(&table_share->key_info[i]);
share->_key_descriptors[i]._parts =
table_share->key_info[i].user_defined_key_parts;
if (i == primary_key) {
share->_key_descriptors[i]._is_unique = true;
share->_key_descriptors[i]._name = tokudb::memory::strdup("primary", 0);
......@@ -1732,8 +1787,9 @@ int ha_tokudb::initialize_share(const char* name, int mode) {
// the "infinity byte" in keys, and for placing the DBT size in the first four bytes
//
ref_length = sizeof(uint32_t) + sizeof(uchar);
KEY_PART_INFO *key_part = table->key_info[primary_key].key_part;
KEY_PART_INFO *end = key_part + get_key_parts(&table->key_info[primary_key]);
KEY_PART_INFO* key_part = table->key_info[primary_key].key_part;
KEY_PART_INFO* end =
key_part + table->key_info[primary_key].user_defined_key_parts;
for (; key_part != end; key_part++) {
ref_length += key_part->field->max_packed_col_length(key_part->length);
TOKU_TYPE toku_type = mysql_to_toku_type(key_part->field);
......@@ -1901,6 +1957,7 @@ int ha_tokudb::open(const char *name, int mode, uint test_if_locked) {
if (ret_val == 0) {
share->set_state(TOKUDB_SHARE::OPENED);
} else {
free_key_and_col_info(&share->kc_info);
share->set_state(TOKUDB_SHARE::ERROR);
}
share->unlock();
......@@ -2616,13 +2673,13 @@ int ha_tokudb::unpack_row(
}
uint32_t ha_tokudb::place_key_into_mysql_buff(
KEY* key_info,
uchar * record,
uchar* data
)
{
KEY_PART_INFO *key_part = key_info->key_part, *end = key_part + get_key_parts(key_info);
uchar *pos = data;
KEY* key_info,
uchar* record,
uchar* data) {
KEY_PART_INFO* key_part = key_info->key_part;
KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
uchar* pos = data;
for (; key_part != end; key_part++) {
if (key_part->field->null_bit) {
......@@ -2682,15 +2739,14 @@ void ha_tokudb::unpack_key(uchar * record, DBT const *key, uint index) {
}
uint32_t ha_tokudb::place_key_into_dbt_buff(
KEY* key_info,
uchar * buff,
const uchar * record,
bool* has_null,
int key_length
)
{
KEY_PART_INFO *key_part = key_info->key_part;
KEY_PART_INFO *end = key_part + get_key_parts(key_info);
KEY* key_info,
uchar* buff,
const uchar* record,
bool* has_null,
int key_length) {
KEY_PART_INFO* key_part = key_info->key_part;
KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
uchar* curr_buff = buff;
*has_null = false;
for (; key_part != end && key_length > 0; key_part++) {
......@@ -2870,25 +2926,29 @@ DBT* ha_tokudb::create_dbt_key_for_lookup(
// Returns:
// the parameter key
//
DBT *ha_tokudb::pack_key(
DBT * key,
uint keynr,
uchar * buff,
const uchar * key_ptr,
uint key_length,
int8_t inf_byte
)
{
TOKUDB_HANDLER_DBUG_ENTER("key %p %u:%2.2x inf=%d", key_ptr, key_length, key_length > 0 ? key_ptr[0] : 0, inf_byte);
DBT* ha_tokudb::pack_key(
DBT* key,
uint keynr,
uchar* buff,
const uchar* key_ptr,
uint key_length,
int8_t inf_byte) {
TOKUDB_HANDLER_DBUG_ENTER(
"key %p %u:%2.2x inf=%d",
key_ptr,
key_length,
key_length > 0 ? key_ptr[0] : 0,
inf_byte);
#if TOKU_INCLUDE_EXTENDED_KEYS
if (keynr != primary_key && !tokudb_test(hidden_primary_key)) {
DBUG_RETURN(pack_ext_key(key, keynr, buff, key_ptr, key_length, inf_byte));
}
#endif
KEY *key_info = &table->key_info[keynr];
KEY_PART_INFO *key_part = key_info->key_part;
KEY_PART_INFO *end = key_part + get_key_parts(key_info);
my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
KEY* key_info = &table->key_info[keynr];
KEY_PART_INFO* key_part = key_info->key_part;
KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set);
memset((void *) key, 0, sizeof(*key));
key->data = buff;
......@@ -2930,31 +2990,30 @@ DBT *ha_tokudb::pack_key(
}
#if TOKU_INCLUDE_EXTENDED_KEYS
DBT *ha_tokudb::pack_ext_key(
DBT * key,
uint keynr,
uchar * buff,
const uchar * key_ptr,
uint key_length,
int8_t inf_byte
)
{
DBT* ha_tokudb::pack_ext_key(
DBT* key,
uint keynr,
uchar* buff,
const uchar* key_ptr,
uint key_length,
int8_t inf_byte) {
TOKUDB_HANDLER_DBUG_ENTER("");
// build a list of PK parts that are in the SK. we will use this list to build the
// extended key if necessary.
KEY *pk_key_info = &table->key_info[primary_key];
uint pk_parts = get_key_parts(pk_key_info);
KEY* pk_key_info = &table->key_info[primary_key];
uint pk_parts = pk_key_info->user_defined_key_parts;
uint pk_next = 0;
struct {
const uchar *key_ptr;
KEY_PART_INFO *key_part;
} pk_info[pk_parts];
KEY *key_info = &table->key_info[keynr];
KEY_PART_INFO *key_part = key_info->key_part;
KEY_PART_INFO *end = key_part + get_key_parts(key_info);
my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set);
KEY* key_info = &table->key_info[keynr];
KEY_PART_INFO* key_part = key_info->key_part;
KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set);
memset((void *) key, 0, sizeof(*key));
key->data = buff;
......@@ -4439,11 +4498,16 @@ int ha_tokudb::prepare_index_scan() {
TOKUDB_HANDLER_DBUG_RETURN(error);
}
static bool index_key_is_null(TABLE *table, uint keynr, const uchar *key, uint key_len) {
static bool index_key_is_null(
TABLE* table,
uint keynr,
const uchar* key,
uint key_len) {
bool key_can_be_null = false;
KEY *key_info = &table->key_info[keynr];
KEY_PART_INFO *key_part = key_info->key_part;
KEY_PART_INFO *end = key_part + get_key_parts(key_info);
KEY* key_info = &table->key_info[keynr];
KEY_PART_INFO* key_part = key_info->key_part;
KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts;
for (; key_part != end; key_part++) {
if (key_part->null_bit) {
key_can_be_null = true;
......@@ -5979,11 +6043,7 @@ int ha_tokudb::info(uint flag) {
#endif
DB_TXN* txn = NULL;
if (flag & HA_STATUS_VARIABLE) {
// Just to get optimizations right
stats.records = share->row_count() + share->rows_from_locked_table;
if (stats.records == 0) {
stats.records++;
}
stats.deleted = 0;
if (!(flag & HA_STATUS_NO_LOCK)) {
uint64_t num_rows = 0;
......@@ -6002,9 +6062,6 @@ int ha_tokudb::info(uint flag) {
if (error == 0) {
share->set_row_count(num_rows, false);
stats.records = num_rows;
if (stats.records == 0) {
stats.records++;
}
} else {
goto cleanup;
}
......@@ -6085,6 +6142,22 @@ int ha_tokudb::info(uint flag) {
stats.delete_length += frag_info.unused_bytes;
}
}
/*
The following comment and logic has been taken from InnoDB and
an old hack was removed that forced to always set stats.records > 0
---
The MySQL optimizer seems to assume in a left join that n_rows
is an accurate estimate if it is zero. Of course, it is not,
since we do not have any locks on the rows yet at this phase.
Since SHOW TABLE STATUS seems to call this function with the
HA_STATUS_TIME flag set, while the left join optimizer does not
set that flag, we add one to a zero value if the flag is not
set. That way SHOW TABLE STATUS will show the best estimate,
while the optimizer never sees the table empty. */
if (stats.records == 0 && !(flag & HA_STATUS_TIME)) {
stats.records++;
}
}
if ((flag & HA_STATUS_CONST)) {
stats.max_data_file_length = 9223372036854775807ULL;
......@@ -6785,9 +6858,9 @@ void ha_tokudb::trace_create_table_info(const char *name, TABLE * form) {
"key:%d:%s:%d",
i,
key->name,
get_key_parts(key));
key->user_defined_key_parts);
uint p;
for (p = 0; p < get_key_parts(key); p++) {
for (p = 0; p < key->user_defined_key_parts; p++) {
KEY_PART_INFO* key_part = &key->key_part[p];
Field* field = key_part->field;
TOKUDB_HANDLER_TRACE(
......@@ -8565,6 +8638,10 @@ int ha_tokudb::delete_all_rows_internal() {
uint curr_num_DBs = 0;
DB_TXN* txn = NULL;
// this should be enough to handle locking as the higher level MDL
// on this table should prevent any new analyze tasks.
share->cancel_background_jobs();
error = txn_begin(db_env, 0, &txn, 0, ha_thd());
if (error) {
goto cleanup;
......@@ -8592,6 +8669,8 @@ int ha_tokudb::delete_all_rows_internal() {
}
}
DEBUG_SYNC(ha_thd(), "tokudb_after_truncate_all_dictionarys");
// zap the row count
if (error == 0) {
share->set_row_count(0, false);
......
......@@ -61,9 +61,9 @@ typedef struct loader_context {
class TOKUDB_SHARE {
public:
enum share_state_t {
CLOSED,
OPENED,
ERROR
CLOSED = 0,
OPENED = 1,
ERROR = 2
};
// one time, start up init
......@@ -88,6 +88,9 @@ class TOKUDB_SHARE {
// exactly 0 _use_count
static void drop_share(TOKUDB_SHARE* share);
// returns state string for logging/reporting
static const char* get_state_string(share_state_t state);
void* operator new(size_t sz);
void operator delete(void* p);
......@@ -306,7 +309,6 @@ class TOKUDB_SHARE {
// cardinality counts
uint32_t _rec_per_keys;
uint64_t* _rec_per_key;
bool _card_changed;
void init(const char* table_name);
void destroy();
......@@ -315,17 +317,34 @@ inline int TOKUDB_SHARE::use_count() const {
return _use_count;
}
inline void TOKUDB_SHARE::lock() const {
TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
_full_table_name.ptr(),
get_state_string(_state),
_use_count);
_mutex.lock();
TOKUDB_SHARE_DBUG_VOID_RETURN();
}
inline void TOKUDB_SHARE::unlock() const {
TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]",
_full_table_name.ptr(),
get_state_string(_state),
_use_count);
_mutex.unlock();
TOKUDB_SHARE_DBUG_VOID_RETURN();
}
inline TOKUDB_SHARE::share_state_t TOKUDB_SHARE::state() const {
return _state;
}
inline void TOKUDB_SHARE::set_state(TOKUDB_SHARE::share_state_t state) {
TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]:new_state[%s]",
_full_table_name.ptr(),
get_state_string(_state),
_use_count,
get_state_string(state));
assert_debug(_mutex.is_owned_by_me());
_state = state;
TOKUDB_SHARE_DBUG_VOID_RETURN();
}
inline const char* TOKUDB_SHARE::full_table_name() const {
return _full_table_name.ptr();
......@@ -346,6 +365,13 @@ inline uint TOKUDB_SHARE::table_name_length() const {
return _table_name.length();
}
inline void TOKUDB_SHARE::set_row_count(uint64_t rows, bool locked) {
TOKUDB_SHARE_DBUG_ENTER("file[%s]:state[%s]:use_count[%d]:rows[%" PRIu64 "]:locked[%d]",
_full_table_name.ptr(),
get_state_string(_state),
_use_count,
rows,
locked);
if (!locked) {
lock();
} else {
......@@ -358,6 +384,7 @@ inline void TOKUDB_SHARE::set_row_count(uint64_t rows, bool locked) {
if (!locked) {
unlock();
}
TOKUDB_SHARE_DBUG_VOID_RETURN();
}
inline ha_rows TOKUDB_SHARE::row_count() const {
return _rows;
......@@ -371,7 +398,6 @@ inline void TOKUDB_SHARE::init_cardinality_counts(
assert_always(_rec_per_key == NULL && _rec_per_keys == 0);
_rec_per_keys = rec_per_keys;
_rec_per_key = rec_per_key;
_card_changed = true;
}
inline void TOKUDB_SHARE::update_cardinality_counts(
uint32_t rec_per_keys,
......@@ -382,7 +408,6 @@ inline void TOKUDB_SHARE::update_cardinality_counts(
assert_always(rec_per_keys == _rec_per_keys);
assert_always(rec_per_key != NULL);
memcpy(_rec_per_key, rec_per_key, _rec_per_keys * sizeof(uint64_t));
_card_changed = true;
}
inline void TOKUDB_SHARE::disallow_auto_analysis() {
assert_debug(_mutex.is_owned_by_me());
......
......@@ -374,6 +374,7 @@ void standard_t::on_run() {
_local_txn = false;
}
assert_always(_share->key_file[0] != NULL);
_result = _share->key_file[0]->stat64(_share->key_file[0], _txn, &stat64);
if (_result != 0) {
_result = HA_ADMIN_FAILED;
......@@ -575,6 +576,7 @@ int standard_t::analyze_key_progress(void) {
int standard_t::analyze_key(uint64_t* rec_per_key_part) {
int error = 0;
DB* db = _share->key_file[_current_key];
assert_always(db != NULL);
uint64_t num_key_parts = _share->_key_descriptors[_current_key]._parts;
uint64_t unique_rows[num_key_parts];
bool is_unique = _share->_key_descriptors[_current_key]._is_unique;
......@@ -897,6 +899,7 @@ int ha_tokudb::do_optimize(THD* thd) {
}
DB* db = share->key_file[i];
assert_always(db != NULL);
error = db->optimize(db);
if (error) {
goto cleanup;
......@@ -1016,7 +1019,8 @@ int ha_tokudb::check(THD* thd, HA_CHECK_OPT* check_opt) {
write_status_msg);
}
for (uint i = 0; i < num_DBs; i++) {
DB *db = share->key_file[i];
DB* db = share->key_file[i];
assert_always(db != NULL);
const char* kname =
i == primary_key ? "primary" : table_share->key_info[i].name;
snprintf(
......
......@@ -680,7 +680,7 @@ int ha_tokudb::alter_table_add_index(
KEY *key = &key_info[i];
*key = ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[i]];
for (KEY_PART_INFO* key_part = key->key_part;
key_part < key->key_part + get_key_parts(key);
key_part < key->key_part + key->user_defined_key_parts;
key_part++) {
key_part->field = table->field[key_part->fieldnr];
}
......@@ -1123,7 +1123,7 @@ int ha_tokudb::alter_table_expand_varchar_offsets(
// Return true if a field is part of a key
static bool field_in_key(KEY *key, Field *field) {
for (uint i = 0; i < get_key_parts(key); i++) {
for (uint i = 0; i < key->user_defined_key_parts; i++) {
KEY_PART_INFO *key_part = &key->key_part[i];
if (strcmp(key_part->field->field_name, field->field_name) == 0)
return true;
......
......@@ -75,8 +75,8 @@ static bool tables_have_same_keys(
if (print_error) {
sql_print_error(
"keys disagree on if they are clustering, %d, %d",
get_key_parts(curr_orig_key),
get_key_parts(curr_altered_key));
curr_orig_key->user_defined_key_parts,
curr_altered_key->user_defined_key_parts);
}
retval = false;
goto cleanup;
......@@ -86,18 +86,19 @@ static bool tables_have_same_keys(
if (print_error) {
sql_print_error(
"keys disagree on if they are unique, %d, %d",
get_key_parts(curr_orig_key),
get_key_parts(curr_altered_key));
curr_orig_key->user_defined_key_parts,
curr_altered_key->user_defined_key_parts);
}
retval = false;
goto cleanup;
}
if (get_key_parts(curr_orig_key) != get_key_parts(curr_altered_key)) {
if (curr_orig_key->user_defined_key_parts !=
curr_altered_key->user_defined_key_parts) {
if (print_error) {
sql_print_error(
"keys have different number of parts, %d, %d",
get_key_parts(curr_orig_key),
get_key_parts(curr_altered_key));
curr_orig_key->user_defined_key_parts,
curr_altered_key->user_defined_key_parts);
}
retval = false;
goto cleanup;
......@@ -105,7 +106,7 @@ static bool tables_have_same_keys(
//
// now verify that each field in the key is the same
//
for (uint32_t j = 0; j < get_key_parts(curr_orig_key); j++) {
for (uint32_t j = 0; j < curr_orig_key->user_defined_key_parts; j++) {
KEY_PART_INFO* curr_orig_part = &curr_orig_key->key_part[j];
KEY_PART_INFO* curr_altered_part = &curr_altered_key->key_part[j];
Field* curr_orig_field = curr_orig_part->field;
......
......@@ -453,7 +453,7 @@ static bool check_all_update_expressions(
static bool full_field_in_key(TABLE* table, Field* field) {
assert_always(table->s->primary_key < table->s->keys);
KEY* key = &table->s->key_info[table->s->primary_key];
for (uint i = 0; i < get_key_parts(key); i++) {
for (uint i = 0; i < key->user_defined_key_parts; i++) {
KEY_PART_INFO* key_part = &key->key_part[i];
if (strcmp(field->field_name, key_part->field->field_name) == 0) {
return key_part->length == field->field_length;
......@@ -519,7 +519,7 @@ static bool check_point_update(Item* conds, TABLE* table) {
if (bitmap_init(&pk_fields, NULL, table->s->fields, FALSE)) // 1 -> failure
return false;
KEY *key = &table->s->key_info[table->s->primary_key];
for (uint i = 0; i < get_key_parts(key); i++)
for (uint i = 0; i < key->user_defined_key_parts; i++)
bitmap_set_bit(&pk_fields, key->key_part[i].field->field_index);
switch (conds->type()) {
......
......@@ -1010,7 +1010,7 @@ static int create_toku_key_descriptor_for_key(KEY* key, uchar* buf) {
uchar* pos = buf;
uint32_t num_bytes_in_field = 0;
uint32_t charset_num = 0;
for (uint i = 0; i < get_key_parts(key); i++){
for (uint i = 0; i < key->user_defined_key_parts; i++) {
Field* field = key->key_part[i].field;
//
// The first byte states if there is a null byte
......@@ -1881,7 +1881,7 @@ static uint32_t pack_desc_pk_offset_info(
bool is_constant_offset = true;
uint32_t offset = 0;
for (uint i = 0; i < get_key_parts(prim_key); i++) {
for (uint i = 0; i < prim_key->user_defined_key_parts; i++) {
KEY_PART_INFO curr = prim_key->key_part[i];
uint16 curr_field_index = curr.field->field_index;
......@@ -2503,8 +2503,8 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
//
// store number of parts
//
assert_always(get_key_parts(prim_key) < 128);
pos[0] = 2 * get_key_parts(prim_key);
assert_always(prim_key->user_defined_key_parts < 128);
pos[0] = 2 * prim_key->user_defined_key_parts;
pos++;
//
// for each part, store if it is a fixed field or var field
......@@ -2514,7 +2514,7 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
//
pk_info = pos;
uchar* tmp = pos;
for (uint i = 0; i < get_key_parts(prim_key); i++) {
for (uint i = 0; i < prim_key->user_defined_key_parts; i++) {
tmp += pack_desc_pk_info(
tmp,
kc_info,
......@@ -2525,11 +2525,11 @@ static uint32_t create_toku_secondary_key_pack_descriptor (
//
// asserting that we moved forward as much as we think we have
//
assert_always(tmp - pos == (2 * get_key_parts(prim_key)));
assert_always(tmp - pos == (2 * prim_key->user_defined_key_parts));
pos = tmp;
}
for (uint i = 0; i < get_key_parts(key_info); i++) {
for (uint i = 0; i < key_info->user_defined_key_parts; i++) {
KEY_PART_INFO curr_kpi = key_info->key_part[i];
uint16 field_index = curr_kpi.field->field_index;
Field* field = table_share->field[field_index];
......
......@@ -36,10 +36,8 @@ Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
#include "sql_class.h"
#include "sql_show.h"
#include "discover.h"
#if (50600 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || (50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799)
#include <binlog.h>
#endif
#include "debug_sync.h"
#undef PACKAGE
#undef VERSION
......
......@@ -674,6 +674,7 @@ int tokudb_end(handlerton* hton, ha_panic_function type) {
// count the total number of prepared txn's that we discard
long total_prepared = 0;
#if TOKU_INCLUDE_XA
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "begin XA cleanup");
while (1) {
// get xid's
const long n_xid = 1;
......@@ -698,6 +699,7 @@ int tokudb_end(handlerton* hton, ha_panic_function type) {
}
total_prepared += n_prepared;
}
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "end XA cleanup");
#endif
error = db_env->close(
db_env,
......@@ -922,19 +924,25 @@ static int tokudb_rollback(handlerton * hton, THD * thd, bool all) {
#if TOKU_INCLUDE_XA
static bool tokudb_sync_on_prepare(void) {
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
// skip sync of log if fsync log period > 0
if (tokudb::sysvars::fsync_log_period > 0)
if (tokudb::sysvars::fsync_log_period > 0) {
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit");
return false;
else
} else {
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit");
return true;
}
}
static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
TOKUDB_DBUG_ENTER("");
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0;
// if tokudb_support_xa is disable, just return
if (!tokudb::sysvars::support_xa(thd)) {
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r);
}
......@@ -944,7 +952,7 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
if (txn) {
uint32_t syncflag = tokudb_sync_on_prepare() ? 0 : DB_TXN_NOSYNC;
TOKUDB_TRACE_FOR_FLAGS(
TOKUDB_DEBUG_TXN,
TOKUDB_DEBUG_XA,
"doing txn prepare:%d:%p",
all,
txn);
......@@ -957,15 +965,18 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
// test hook to induce a crash on a debug build
DBUG_EXECUTE_IF("tokudb_crash_prepare_after", DBUG_SUICIDE(););
} else {
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_TXN, "nothing to prepare %d", all);
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "nothing to prepare %d", all);
}
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r);
}
static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) {
TOKUDB_DBUG_ENTER("");
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0;
if (len == 0 || xid_list == NULL) {
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", 0);
TOKUDB_DBUG_RETURN(0);
}
long num_returned = 0;
......@@ -976,11 +987,13 @@ static int tokudb_xa_recover(handlerton* hton, XID* xid_list, uint len) {
&num_returned,
DB_NEXT);
assert_always(r == 0);
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %ld", num_returned);
TOKUDB_DBUG_RETURN((int)num_returned);
}
static int tokudb_commit_by_xid(handlerton* hton, XID* xid) {
TOKUDB_DBUG_ENTER("");
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0;
DB_TXN* txn = NULL;
TOKU_XA_XID* toku_xid = (TOKU_XA_XID*)xid;
......@@ -993,11 +1006,13 @@ static int tokudb_commit_by_xid(handlerton* hton, XID* xid) {
r = 0;
cleanup:
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r);
}
static int tokudb_rollback_by_xid(handlerton* hton, XID* xid) {
TOKUDB_DBUG_ENTER("");
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "enter");
int r = 0;
DB_TXN* txn = NULL;
TOKU_XA_XID* toku_xid = (TOKU_XA_XID*)xid;
......@@ -1010,6 +1025,7 @@ static int tokudb_rollback_by_xid(handlerton* hton, XID* xid) {
r = 0;
cleanup:
TOKUDB_TRACE_FOR_FLAGS(TOKUDB_DEBUG_XA, "exit %d", r);
TOKUDB_DBUG_RETURN(r);
}
......
......@@ -199,14 +199,4 @@ void tokudb_pretty_left_key(const DB* db, const DBT* key, String* out);
void tokudb_pretty_right_key(const DB* db, const DBT* key, String* out);
const char *tokudb_get_index_name(DB* db);
inline uint get_key_parts(const KEY *key) {
#if (50609 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50699) || \
(50700 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50799) || \
(100009 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 100099)
return key->user_defined_key_parts;
#else
return key->key_parts;
#endif
}
#endif //#ifdef _HATOKU_HTON
......@@ -14,5 +14,5 @@ select * from t;
a b
select TABLE_ROWS from information_schema.tables where table_schema='test' and table_name='t';
TABLE_ROWS
1
0
drop table t;
......@@ -17,5 +17,5 @@ test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 7 NULL NULL BTREE
t 1 x 1 x A 7 NULL NULL YES BTREE
t 1 x 1 x A 3 NULL NULL YES BTREE
drop table t;
......@@ -15,7 +15,7 @@ test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 5 NULL NULL BTREE
t 1 x 1 x A 5 NULL NULL YES BTREE
t 1 x 1 x A 2 NULL NULL YES BTREE
t 1 y 1 y A 5 NULL NULL YES BTREE
alter table t analyze partition p1;
Table Op Msg_type Msg_text
......@@ -23,13 +23,13 @@ test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 5 NULL NULL BTREE
t 1 x 1 x A 5 NULL NULL YES BTREE
t 1 x 1 x A 2 NULL NULL YES BTREE
t 1 y 1 y A 5 NULL NULL YES BTREE
insert into t values (100,1,1),(200,2,1),(300,3,1),(400,4,1),(500,5,1);
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 9 NULL NULL BTREE
t 1 x 1 x A 9 NULL NULL YES BTREE
t 1 x 1 x A 4 NULL NULL YES BTREE
t 1 y 1 y A 9 NULL NULL YES BTREE
alter table t analyze partition p0;
Table Op Msg_type Msg_text
......
drop table if exists t1;
set @orig_table_open_cache = @@global.table_open_cache;
create table t1(a int) engine = tokudb partition by key(a) partitions 2 (partition p0 engine = tokudb, partition p1 engine = tokudb);
lock tables t1 read;
set @@global.table_open_cache = 1;
begin;
insert into t1 values(1),(1);
select * from t1 where c like _ucs2 0x039C0025 collate ucs2_unicode_ci;
ERROR 42S22: Unknown column 'c' in 'where clause'
create table t1(c1 binary (1), c2 varbinary(1));
ERROR 42S01: Table 't1' already exists
unlock tables;
drop table t1;
set @@global.table_open_cache = @orig_table_open_cache;
set @orig_auto_analyze = @@session.tokudb_auto_analyze;
set @orig_in_background = @@session.tokudb_analyze_in_background;
set @orig_mode = @@session.tokudb_analyze_mode;
set @orig_throttle = @@session.tokudb_analyze_throttle;
set @orig_time = @@session.tokudb_analyze_time;
set @orig_scale_percent = @@global.tokudb_cardinality_scale_percent;
set @orig_default_storage_engine = @@session.default_storage_engine;
set @orig_pause_background_job_manager = @@global.tokudb_debug_pause_background_job_manager;
set session default_storage_engine = 'tokudb';
set session tokudb_auto_analyze = 1;
set session tokudb_analyze_in_background = 1;
set session tokudb_analyze_mode = tokudb_analyze_standard;
set session tokudb_analyze_throttle = 0;
set session tokudb_analyze_time = 0;
set global tokudb_cardinality_scale_percent = DEFAULT;
set global tokudb_debug_pause_background_job_manager = TRUE;
create table t1 (a int not null auto_increment, b int, c int, primary key(a), key kb(b), key kc(c), key kabc(a,b,c), key kab(a,b), key kbc(b,c));
insert into t1(b,c) values(0,0), (1,1), (2,2), (3,3);
select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
database_name table_name job_type job_params scheduler
test t1 TOKUDB_ANALYZE_MODE_STANDARD TOKUDB_ANALYZE_DELETE_FRACTION=1.000000; TOKUDB_ANALYZE_TIME=0; TOKUDB_ANALYZE_THROTTLE=0; AUTO
set DEBUG_SYNC = 'tokudb_after_truncate_all_dictionarys SIGNAL closed WAIT_FOR done';
TRUNCATE TABLE t1;
set global tokudb_debug_pause_background_job_manager = FALSE;
set DEBUG_SYNC = 'now SIGNAL done';
drop table t1;
set session tokudb_auto_analyze = @orig_auto_analyze;
set session tokudb_analyze_in_background = @orig_in_background;
set session tokudb_analyze_mode = @orig_mode;
set session tokudb_analyze_throttle = @orig_throttle;
set session tokudb_analyze_time = @orig_time;
set global tokudb_cardinality_scale_percent = @orig_scale_percent;
set session default_storage_engine = @orig_default_storage_engine;
set global tokudb_debug_pause_background_job_manager = @orig_pause_background_job_manager;
# test DB-917
# test that table/share open lock timeout does not crash the server on subsequent access
source include/have_tokudb.inc;
disable_warnings;
drop table if exists t1;
enable_warnings;
set @orig_table_open_cache = @@global.table_open_cache;
create table t1(a int) engine = tokudb partition by key(a) partitions 2 (partition p0 engine = tokudb, partition p1 engine = tokudb);
lock tables t1 read;
set @@global.table_open_cache = 1;
begin;
insert into t1 values(1),(1);
# when the bug is present, this results in a lock wait timeout
--error ER_BAD_FIELD_ERROR
select * from t1 where c like _ucs2 0x039C0025 collate ucs2_unicode_ci;
# when the bug exists, this results in the assertion
# kc_info->cp_info[keynr] == NULL in tokudb/ha_tokudb.cc initialize_col_pack_info
--error ER_TABLE_EXISTS_ERROR
create table t1(c1 binary (1), c2 varbinary(1));
unlock tables;
drop table t1;
set @@global.table_open_cache = @orig_table_open_cache;
# This test for DB-938 tests a race condition where a scheduled background job
# (analyze) ends up operating on a set of DB* key_file[] in TOKUDB_SHARE that
# were set to NULL during a TRUNCATE TABLE operation.
-- source include/have_tokudb.inc
-- source include/have_debug.inc
-- source include/have_debug_sync.inc
-- enable_query_log
set @orig_auto_analyze = @@session.tokudb_auto_analyze;
set @orig_in_background = @@session.tokudb_analyze_in_background;
set @orig_mode = @@session.tokudb_analyze_mode;
set @orig_throttle = @@session.tokudb_analyze_throttle;
set @orig_time = @@session.tokudb_analyze_time;
set @orig_scale_percent = @@global.tokudb_cardinality_scale_percent;
set @orig_default_storage_engine = @@session.default_storage_engine;
set @orig_pause_background_job_manager = @@global.tokudb_debug_pause_background_job_manager;
# first, lets set up to auto analyze in the background with about any activity
set session default_storage_engine = 'tokudb';
set session tokudb_auto_analyze = 1;
set session tokudb_analyze_in_background = 1;
set session tokudb_analyze_mode = tokudb_analyze_standard;
set session tokudb_analyze_throttle = 0;
set session tokudb_analyze_time = 0;
set global tokudb_cardinality_scale_percent = DEFAULT;
# in debug build, we can prevent the background job manager from running,
# let's do it to hold a job from running until we get the TRUNCATE TABLE
# in action
set global tokudb_debug_pause_background_job_manager = TRUE;
create table t1 (a int not null auto_increment, b int, c int, primary key(a), key kb(b), key kc(c), key kabc(a,b,c), key kab(a,b), key kbc(b,c));
insert into t1(b,c) values(0,0), (1,1), (2,2), (3,3);
# insert above should have triggered an analyze, but since the bjm is paused,
# we will see it sitting in the queue
select database_name, table_name, job_type, job_params, scheduler from information_schema.tokudb_background_job_status;
# lets flip to another connection
connect(conn1, localhost, root);
# set up the DEBUG_SYNC point
set DEBUG_SYNC = 'tokudb_after_truncate_all_dictionarys SIGNAL closed WAIT_FOR done';
# send the truncat table
send TRUNCATE TABLE t1;
# back to default connection
connection default;
# release the bjm
set global tokudb_debug_pause_background_job_manager = FALSE;
# if the bug is present, the bjm should crash here within 1/4 of a second
sleep 5;
# lets release and clean up
set DEBUG_SYNC = 'now SIGNAL done';
connection conn1;
reap;
connection default;
disconnect conn1;
drop table t1;
set session tokudb_auto_analyze = @orig_auto_analyze;
set session tokudb_analyze_in_background = @orig_in_background;
set session tokudb_analyze_mode = @orig_mode;
set session tokudb_analyze_throttle = @orig_throttle;
set session tokudb_analyze_time = @orig_time;
set global tokudb_cardinality_scale_percent = @orig_scale_percent;
set session default_storage_engine = @orig_default_storage_engine;
set global tokudb_debug_pause_background_job_manager = @orig_pause_background_job_manager;
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- source include/have_debug.inc
# Valgrind would report memory leaks on the intentional crashes
-- source include/not_valgrind.inc
# Embedded server does not support crashing
-- source include/not_embedded.inc
# Avoid CrashReporter popup on Mac
-- source include/not_crashrep.inc
--disable_warnings
drop table if exists t1, t2;
......
-- source include/have_innodb.inc
-- source include/have_tokudb.inc
-- source include/have_debug.inc
# Valgrind would report memory leaks on the intentional crashes
-- source include/not_valgrind.inc
# Embedded server does not support crashing
-- source include/not_embedded.inc
# Avoid CrashReporter popup on Mac
-- source include/not_crashrep.inc
--disable_warnings
drop table if exists t1, t2;
......
--source include/have_tokudb.inc
--source include/have_debug.inc
# Valgrind would report memory leaks on the intentional crashes
-- source include/not_valgrind.inc
# Embedded server does not support crashing
-- source include/not_embedded.inc
# Avoid CrashReporter popup on Mac
-- source include/not_crashrep.inc
--disable_warnings
drop table if exists t1;
......
......@@ -27,7 +27,7 @@ namespace tokudb {
uint compute_total_key_parts(TABLE_SHARE *table_share) {
uint total_key_parts = 0;
for (uint i = 0; i < table_share->keys; i++) {
total_key_parts += get_key_parts(&table_share->key_info[i]);
total_key_parts += table_share->key_info[i].user_defined_key_parts;
}
return total_key_parts;
}
......@@ -156,13 +156,14 @@ namespace tokudb {
uint orig_key_parts = 0;
for (uint i = 0; i < table_share->keys; i++) {
orig_key_offset[i] = orig_key_parts;
orig_key_parts += get_key_parts(&table_share->key_info[i]);
orig_key_parts += table_share->key_info[i].user_defined_key_parts;
}
// if orig card data exists, then use it to compute new card data
if (error == 0) {
uint next_key_parts = 0;
for (uint i = 0; error == 0 && i < altered_table_share->keys; i++) {
uint ith_key_parts = get_key_parts(&altered_table_share->key_info[i]);
uint ith_key_parts =
altered_table_share->key_info[i].user_defined_key_parts;
uint orig_key_index;
if (find_index_of_key(
altered_table_share->key_info[i].name,
......
......@@ -50,6 +50,8 @@ static void tokudb_backtrace(void);
#define TOKUDB_DEBUG_UPSERT (1<<12)
#define TOKUDB_DEBUG_CHECK (1<<13)
#define TOKUDB_DEBUG_ANALYZE (1<<14)
#define TOKUDB_DEBUG_XA (1<<15)
#define TOKUDB_DEBUG_SHARE (1<<16)
#define TOKUDB_TRACE(_fmt, ...) { \
fprintf(stderr, "%u %s:%u %s " _fmt "\n", tokudb::thread::my_tid(), \
......@@ -124,7 +126,6 @@ static void tokudb_backtrace(void);
DBUG_RETURN(r); \
}
#define TOKUDB_HANDLER_DBUG_VOID_RETURN { \
if (TOKUDB_UNLIKELY(tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN)) { \
TOKUDB_HANDLER_TRACE("return"); \
......@@ -132,6 +133,61 @@ static void tokudb_backtrace(void);
DBUG_VOID_RETURN; \
}
#define TOKUDB_SHARE_TRACE(_fmt, ...) \
fprintf(stderr, "%u %p %s:%u TOUDB_SHARE::%s " _fmt "\n", \
tokudb::thread::my_tid(), this, __FILE__, __LINE__, \
__FUNCTION__, ##__VA_ARGS__);
#define TOKUDB_SHARE_TRACE_FOR_FLAGS(_flags, _fmt, ...) { \
if (TOKUDB_UNLIKELY(TOKUDB_DEBUG_FLAGS(_flags))) { \
TOKUDB_SHARE_TRACE(_fmt, ##__VA_ARGS__); \
} \
}
#define TOKUDB_SHARE_DBUG_ENTER(_fmt, ...) { \
if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_ENTER) || \
(tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
TOKUDB_SHARE_TRACE(_fmt, ##__VA_ARGS__); \
} \
} \
DBUG_ENTER(__FUNCTION__);
#define TOKUDB_SHARE_DBUG_RETURN(r) { \
int rr = (r); \
if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
(tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE) || \
(rr != 0 && (tokudb::sysvars::debug & TOKUDB_DEBUG_ERROR)))) { \
TOKUDB_SHARE_TRACE("return %d", rr); \
} \
DBUG_RETURN(rr); \
}
#define TOKUDB_SHARE_DBUG_RETURN_DOUBLE(r) { \
double rr = (r); \
if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
(tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
TOKUDB_SHARE_TRACE("return %f", rr); \
} \
DBUG_RETURN(rr); \
}
#define TOKUDB_SHARE_DBUG_RETURN_PTR(r) { \
if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
(tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
TOKUDB_SHARE_TRACE("return 0x%p", r); \
} \
DBUG_RETURN(r); \
}
#define TOKUDB_SHARE_DBUG_VOID_RETURN() { \
if (TOKUDB_UNLIKELY((tokudb::sysvars::debug & TOKUDB_DEBUG_RETURN) || \
(tokudb::sysvars::debug & TOKUDB_DEBUG_SHARE))) { \
TOKUDB_SHARE_TRACE("return"); \
} \
DBUG_VOID_RETURN; \
}
#define TOKUDB_DBUG_DUMP(s, p, len) \
{ \
TOKUDB_TRACE("%s", s); \
......
......@@ -1119,9 +1119,9 @@ void background_job_status_callback(
table->field[3]->store(type, strlen(type), system_charset_info);
table->field[4]->store(params, strlen(params), system_charset_info);
if (user_scheduled)
table->field[5]->store("USER", sizeof("USER"), system_charset_info);
table->field[5]->store("USER", strlen("USER"), system_charset_info);
else
table->field[5]->store("AUTO", sizeof("AUTO"), system_charset_info);
table->field[5]->store("AUTO", strlen("AUTO"), system_charset_info);
field_store_time_t(table->field[6], scheduled_time);
field_store_time_t(table->field[7], started_time);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment