Commit e4df6e57 authored by Sergei Golubchik's avatar Sergei Golubchik

Merge commit 'tokudb-engine/tokudb-7.5.6' into 5.5

parents 2f446f25 591288a5
......@@ -25,7 +25,7 @@ IF (HAVE_WVLA)
ENDIF()
############################################
SET(TOKUDB_VERSION "tokudb-7.5.5")
SET(TOKUDB_VERSION "tokudb-7.5.6")
SET(TOKUDB_DEB_FILES "usr/lib/mysql/plugin/ha_tokudb.so\netc/mysql/conf.d/tokudb.cnf\nusr/bin/tokuftdump\nusr/share/doc/mariadb-server-5.5/README-TOKUDB\nusr/share/doc/mariadb-server-5.5/README.md" PARENT_SCOPE)
SET(USE_BDB OFF CACHE BOOL "")
MARK_AS_ADVANCED(BUILDNAME)
......
......@@ -30,14 +30,14 @@ working MySQL or MariaDB with Tokutek patches, and with the TokuDB storage
engine, called `make.mysql.bash`. This script will download copies of the
needed source code from github and build everything.
To build MySQL 5.5.40 with TokuDB 7.5.3:
To build MySQL 5.5.41 with TokuDB 7.5.5:
```sh
scripts/make.mysql.bash --mysqlbuild=mysql-5.5.40-tokudb-7.5.3-linux-x86_64
scripts/make.mysql.bash --mysqlbuild=mysql-5.5.41-tokudb-7.5.5-linux-x86_64
```
To build MariaDB 5.5.40 with TokuDB 7.5.3:
To build MariaDB 5.5.41 with TokuDB 7.5.5:
```sh
scripts/make.mysql.bash --mysqlbuild=mariadb-5.5.40-tokudb-7.5.3-linux-x86_64
scripts/make.mysql.bash --mysqlbuild=mariadb-5.5.41-tokudb-7.5.5-linux-x86_64
```
Before you start, make sure you have a C++11-compatible compiler (GCC >=
......@@ -59,6 +59,7 @@ repositories, run this:
scripts/make.mysql.debug.env.bash
```
We use gcc from devtoolset-1.1 on CentOS 5.9 for builds.
Contribute
----------
......
......@@ -3272,7 +3272,7 @@ void ha_tokudb::start_bulk_insert(ha_rows rows) {
lock_count = 0;
if ((rows == 0 || rows > 1) && share->try_table_lock) {
if (get_prelock_empty(thd) && may_table_be_empty(transaction)) {
if (get_prelock_empty(thd) && may_table_be_empty(transaction) && transaction != NULL) {
if (using_ignore || is_insert_ignore(thd) || thd->lex->duplicates != DUP_ERROR
|| table->s->next_number_key_offset) {
acquire_table_lock(transaction, lock_write);
......@@ -3963,13 +3963,13 @@ int ha_tokudb::write_row(uchar * record) {
goto cleanup;
}
}
txn = create_sub_trans ? sub_trans : transaction;
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_HANDLER_TRACE("txn %p", txn);
}
if (tokudb_debug & TOKUDB_DEBUG_CHECK_KEY) {
test_row_packing(record,&prim_key,&row);
}
if (loader) {
error = loader->put(loader, &prim_key, &row);
if (error) {
......@@ -4243,7 +4243,7 @@ int ha_tokudb::delete_row(const uchar * record) {
bool has_null;
THD* thd = ha_thd();
uint curr_num_DBs;
tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);;
tokudb_trx_data* trx = (tokudb_trx_data *) thd_get_ha_data(thd, tokudb_hton);
ha_statistic_increment(&SSV::ha_delete_count);
......@@ -4268,10 +4268,14 @@ int ha_tokudb::delete_row(const uchar * record) {
goto cleanup;
}
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_HANDLER_TRACE("all %p stmt %p sub_sp_level %p transaction %p", trx->all, trx->stmt, trx->sub_sp_level, transaction);
}
error = db_env->del_multiple(
db_env,
share->key_file[primary_key],
transaction,
transaction,
&prim_key,
&row,
curr_num_DBs,
......@@ -7177,12 +7181,15 @@ To rename the table, make sure no transactions touch the table.", from, to);
double ha_tokudb::scan_time() {
TOKUDB_HANDLER_DBUG_ENTER("");
double ret_val = (double)stats.records / 3;
if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
TOKUDB_HANDLER_TRACE("return %" PRIu64 " %f", (uint64_t) stats.records, ret_val);
}
DBUG_RETURN(ret_val);
}
double ha_tokudb::keyread_time(uint index, uint ranges, ha_rows rows)
{
TOKUDB_HANDLER_DBUG_ENTER("");
TOKUDB_HANDLER_DBUG_ENTER("%u %u %" PRIu64, index, ranges, (uint64_t) rows);
double ret_val;
if (index == primary_key || key_is_clustering(&table->key_info[index])) {
ret_val = read_time(index, ranges, rows);
......@@ -7200,6 +7207,9 @@ double ha_tokudb::keyread_time(uint index, uint ranges, ha_rows rows)
(table->key_info[index].key_length +
ref_length) + 1);
ret_val = (rows + keys_per_block - 1)/ keys_per_block;
if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
TOKUDB_HANDLER_TRACE("return %f", ret_val);
}
DBUG_RETURN(ret_val);
}
......@@ -7220,7 +7230,7 @@ double ha_tokudb::read_time(
ha_rows rows
)
{
TOKUDB_HANDLER_DBUG_ENTER("");
TOKUDB_HANDLER_DBUG_ENTER("%u %u %" PRIu64, index, ranges, (uint64_t) rows);
double total_scan;
double ret_val;
bool is_primary = (index == primary_key);
......@@ -7262,12 +7272,18 @@ double ha_tokudb::read_time(
ret_val = is_clustering ? ret_val + 0.00001 : ret_val;
cleanup:
if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
TOKUDB_HANDLER_TRACE("return %f", ret_val);
}
DBUG_RETURN(ret_val);
}
double ha_tokudb::index_only_read_time(uint keynr, double records) {
TOKUDB_HANDLER_DBUG_ENTER("");
TOKUDB_HANDLER_DBUG_ENTER("%u %f", keynr, records);
double ret_val = keyread_time(keynr, 1, (ha_rows)records);
if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
TOKUDB_HANDLER_TRACE("return %f", ret_val);
}
DBUG_RETURN(ret_val);
}
......@@ -7342,7 +7358,7 @@ ha_rows ha_tokudb::records_in_range(uint keynr, key_range* start_key, key_range*
cleanup:
if (tokudb_debug & TOKUDB_DEBUG_RETURN) {
TOKUDB_HANDLER_TRACE("%" PRIu64 " %" PRIu64, (uint64_t) ret_val, rows);
TOKUDB_HANDLER_TRACE("return %" PRIu64 " %" PRIu64, (uint64_t) ret_val, rows);
}
DBUG_RETURN(ret_val);
}
......
......@@ -156,18 +156,47 @@ int ha_tokudb::analyze(THD *thd, HA_CHECK_OPT *check_opt) {
bool is_unique = false;
if (i == primary_key || (key_info->flags & HA_NOSAME))
is_unique = true;
uint64_t rows = 0;
uint64_t deleted_rows = 0;
int error = tokudb::analyze_card(share->key_file[i], txn, is_unique, num_key_parts, &rec_per_key[total_key_parts],
tokudb_cmp_dbt_key_parts, analyze_progress, &analyze_progress_extra);
tokudb_cmp_dbt_key_parts, analyze_progress, &analyze_progress_extra,
&rows, &deleted_rows);
sql_print_information("tokudb analyze %d %" PRIu64 " %" PRIu64, error, rows, deleted_rows);
if (error != 0 && error != ETIME) {
result = HA_ADMIN_FAILED;
} else {
// debug
if (tokudb_debug & TOKUDB_DEBUG_ANALYZE) {
TOKUDB_HANDLER_TRACE("%s.%s.%s",
table_share->db.str, table_share->table_name.str, i == primary_key ? "primary" : table_share->key_info[i].name);
for (uint j = 0; j < num_key_parts; j++)
TOKUDB_HANDLER_TRACE("%lu", rec_per_key[total_key_parts+j]);
}
}
if (error != 0 && rows == 0 && deleted_rows > 0) {
result = HA_ADMIN_FAILED;
}
double f = THDVAR(thd, analyze_delete_fraction);
if (result == HA_ADMIN_FAILED || (double) deleted_rows > f * (rows + deleted_rows)) {
char name[256]; int namelen;
namelen = snprintf(name, sizeof name, "%.*s.%.*s.%s",
(int) table_share->db.length, table_share->db.str,
(int) table_share->table_name.length, table_share->table_name.str,
key_name);
thd->protocol->prepare_for_resend();
thd->protocol->store(name, namelen, system_charset_info);
thd->protocol->store("analyze", 7, system_charset_info);
thd->protocol->store("info", 4, system_charset_info);
char rowmsg[256]; int rowmsglen;
rowmsglen = snprintf(rowmsg, sizeof rowmsg, "rows processed %" PRIu64 " rows deleted %" PRIu64, rows, deleted_rows);
thd->protocol->store(rowmsg, rowmsglen, system_charset_info);
thd->protocol->write();
sql_print_information("tokudb analyze on %.*s %.*s",
namelen, name, rowmsglen, rowmsg);
}
if (tokudb_debug & TOKUDB_DEBUG_ANALYZE) {
char name[256]; int namelen;
namelen = snprintf(name, sizeof name, "%.*s.%.*s.%s",
(int) table_share->db.length, table_share->db.str,
(int) table_share->table_name.length, table_share->table_name.str,
key_name);
TOKUDB_HANDLER_TRACE("%.*s rows %" PRIu64 " deleted %" PRIu64,
namelen, name, rows, deleted_rows);
for (uint j = 0; j < num_key_parts; j++)
TOKUDB_HANDLER_TRACE("%lu", rec_per_key[total_key_parts+j]);
}
total_key_parts += num_key_parts;
}
......
......@@ -790,7 +790,7 @@ extern "C" enum durability_properties thd_get_durability_property(const MYSQL_TH
#endif
// Determine if an fsync is used when a transaction is committed.
static bool tokudb_fsync_on_commit(THD *thd, tokudb_trx_data *trx, DB_TXN *txn) {
static bool tokudb_sync_on_commit(THD *thd, tokudb_trx_data *trx, DB_TXN *txn) {
#if MYSQL_VERSION_ID >= 50600
// Check the client durability property which is set during 2PC
if (thd_get_durability_property(thd) == HA_IGNORE_DURABILITY)
......@@ -801,17 +801,19 @@ static bool tokudb_fsync_on_commit(THD *thd, tokudb_trx_data *trx, DB_TXN *txn)
if (txn->is_prepared(txn) && mysql_bin_log.is_open())
return false;
#endif
if (tokudb_fsync_log_period > 0)
return false;
return THDVAR(thd, commit_sync) != 0;
}
static int tokudb_commit(handlerton * hton, THD * thd, bool all) {
TOKUDB_DBUG_ENTER("");
TOKUDB_DBUG_ENTER("%u", all);
DBUG_PRINT("trans", ("ending transaction %s", all ? "all" : "stmt"));
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton);
DB_TXN **txn = all ? &trx->all : &trx->stmt;
DB_TXN *this_txn = *txn;
if (this_txn) {
uint32_t syncflag = tokudb_fsync_on_commit(thd, trx, this_txn) ? 0 : DB_TXN_NOSYNC;
uint32_t syncflag = tokudb_sync_on_commit(thd, trx, this_txn) ? 0 : DB_TXN_NOSYNC;
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_TRACE("commit trx %u txn %p syncflag %u", all, this_txn, syncflag);
}
......@@ -821,11 +823,11 @@ static int tokudb_commit(handlerton * hton, THD * thd, bool all) {
commit_txn_with_progress(this_txn, syncflag, thd);
// test hook to induce a crash on a debug build
DBUG_EXECUTE_IF("tokudb_crash_commit_after", DBUG_SUICIDE(););
if (this_txn == trx->sp_level) {
trx->sp_level = 0;
}
*txn = 0;
*txn = NULL;
trx->sub_sp_level = NULL;
if (this_txn == trx->sp_level || trx->all == NULL) {
trx->sp_level = NULL;
}
}
else if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_TRACE("nothing to commit %d", all);
......@@ -835,7 +837,7 @@ static int tokudb_commit(handlerton * hton, THD * thd, bool all) {
}
static int tokudb_rollback(handlerton * hton, THD * thd, bool all) {
TOKUDB_DBUG_ENTER("");
TOKUDB_DBUG_ENTER("%u", all);
DBUG_PRINT("trans", ("aborting transaction %s", all ? "all" : "stmt"));
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton);
DB_TXN **txn = all ? &trx->all : &trx->stmt;
......@@ -846,11 +848,11 @@ static int tokudb_rollback(handlerton * hton, THD * thd, bool all) {
}
tokudb_cleanup_handlers(trx, this_txn);
abort_txn_with_progress(this_txn, thd);
if (this_txn == trx->sp_level) {
trx->sp_level = 0;
}
*txn = 0;
*txn = NULL;
trx->sub_sp_level = NULL;
if (this_txn == trx->sp_level || trx->all == NULL) {
trx->sp_level = NULL;
}
}
else {
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
......@@ -862,6 +864,13 @@ static int tokudb_rollback(handlerton * hton, THD * thd, bool all) {
}
#if TOKU_INCLUDE_XA
static bool tokudb_sync_on_prepare(void) {
// skip sync of log if fsync log period > 0
if (tokudb_fsync_log_period > 0)
return false;
else
return true;
}
static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
TOKUDB_DBUG_ENTER("");
......@@ -876,6 +885,7 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton);
DB_TXN* txn = all ? trx->all : trx->stmt;
if (txn) {
uint32_t syncflag = tokudb_sync_on_prepare() ? 0 : DB_TXN_NOSYNC;
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_TRACE("doing txn prepare:%d:%p", all, txn);
}
......@@ -884,7 +894,7 @@ static int tokudb_xa_prepare(handlerton* hton, THD* thd, bool all) {
thd_get_xid(thd, (MYSQL_XID*) &thd_xid);
// test hook to induce a crash on a debug build
DBUG_EXECUTE_IF("tokudb_crash_prepare_before", DBUG_SUICIDE(););
r = txn->xa_prepare(txn, &thd_xid);
r = txn->xa_prepare(txn, &thd_xid, syncflag);
// test hook to induce a crash on a debug build
DBUG_EXECUTE_IF("tokudb_crash_prepare_after", DBUG_SUICIDE(););
}
......@@ -949,7 +959,7 @@ static int tokudb_rollback_by_xid(handlerton* hton, XID* xid) {
#endif
static int tokudb_savepoint(handlerton * hton, THD * thd, void *savepoint) {
TOKUDB_DBUG_ENTER("");
TOKUDB_DBUG_ENTER("%p", savepoint);
int error;
SP_INFO save_info = (SP_INFO)savepoint;
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton);
......@@ -970,6 +980,9 @@ static int tokudb_savepoint(handlerton * hton, THD * thd, void *savepoint) {
trx->sp_level = save_info->txn;
save_info->in_sub_stmt = false;
}
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_TRACE("begin txn %p", save_info->txn);
}
save_info->trx = trx;
error = 0;
cleanup:
......@@ -977,7 +990,7 @@ static int tokudb_savepoint(handlerton * hton, THD * thd, void *savepoint) {
}
static int tokudb_rollback_to_savepoint(handlerton * hton, THD * thd, void *savepoint) {
TOKUDB_DBUG_ENTER("");
TOKUDB_DBUG_ENTER("%p", savepoint);
int error;
SP_INFO save_info = (SP_INFO)savepoint;
DB_TXN* parent = NULL;
......@@ -985,6 +998,9 @@ static int tokudb_rollback_to_savepoint(handlerton * hton, THD * thd, void *save
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton);
parent = txn_to_rollback->parent;
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_TRACE("rollback txn %p", txn_to_rollback);
}
if (!(error = txn_to_rollback->abort(txn_to_rollback))) {
if (save_info->in_sub_stmt) {
trx->sub_sp_level = parent;
......@@ -998,24 +1014,27 @@ static int tokudb_rollback_to_savepoint(handlerton * hton, THD * thd, void *save
}
static int tokudb_release_savepoint(handlerton * hton, THD * thd, void *savepoint) {
TOKUDB_DBUG_ENTER("");
int error;
TOKUDB_DBUG_ENTER("%p", savepoint);
int error = 0;
SP_INFO save_info = (SP_INFO)savepoint;
DB_TXN* parent = NULL;
DB_TXN* txn_to_commit = save_info->txn;
tokudb_trx_data *trx = (tokudb_trx_data *) thd_get_ha_data(thd, hton);
parent = txn_to_commit->parent;
if (!(error = txn_to_commit->commit(txn_to_commit, 0))) {
if (tokudb_debug & TOKUDB_DEBUG_TXN) {
TOKUDB_TRACE("commit txn %p", txn_to_commit);
}
DB_TXN *child = txn_to_commit->get_child(txn_to_commit);
if (child == NULL && !(error = txn_to_commit->commit(txn_to_commit, 0))) {
if (save_info->in_sub_stmt) {
trx->sub_sp_level = parent;
}
else {
trx->sp_level = parent;
}
save_info->txn = NULL;
}
save_info->txn = NULL;
TOKUDB_DBUG_RETURN(error);
}
......@@ -1457,6 +1476,7 @@ static struct st_mysql_sys_var *tokudb_system_variables[] = {
MYSQL_SYSVAR(disable_slow_upsert),
#endif
MYSQL_SYSVAR(analyze_time),
MYSQL_SYSVAR(analyze_delete_fraction),
MYSQL_SYSVAR(fsync_log_period),
#if TOKU_INCLUDE_HANDLERTON_HANDLE_FATAL_SIGNAL
MYSQL_SYSVAR(gdb_path),
......
......@@ -316,16 +316,9 @@ static MYSQL_THDVAR_BOOL(disable_slow_upsert,
);
#endif
static MYSQL_THDVAR_UINT(analyze_time,
0,
"analyze time",
NULL,
NULL,
5, // default
0, // min
~0U, // max
1 // blocksize
);
static MYSQL_THDVAR_UINT(analyze_time, 0, "analyze time (seconds)", NULL /*check*/, NULL /*update*/, 5 /*default*/, 0 /*min*/, ~0U /*max*/, 1 /*blocksize*/);
static MYSQL_THDVAR_DOUBLE(analyze_delete_fraction, 0, "fraction of rows allowed to be deleted", NULL /*check*/, NULL /*update*/, 1.0 /*def*/, 0 /*min*/, 1.0 /*max*/, 1);
static void tokudb_checkpoint_lock(THD * thd);
static void tokudb_checkpoint_unlock(THD * thd);
......@@ -430,7 +423,7 @@ static int tokudb_killed_callback(void) {
return thd_killed(thd);
}
static bool tokudb_killed_thd_callback(void *extra) {
static bool tokudb_killed_thd_callback(void *extra, uint64_t deleted_rows) {
THD *thd = static_cast<THD *>(extra);
return thd_killed(thd) != 0;
}
......
......@@ -10005,7 +10005,7 @@ insert into t values (9999,0);
commit;
explain select id from t where id>0 limit 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t index_or_range PRIMARY PRIMARY 8 NULL # Using where; Using index_or_range
1 SIMPLE t range_or_index PRIMARY PRIMARY 8 NULL # Using where; Using index
explain select * from t where id>0 limit 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY PRIMARY 8 NULL # Using where
......
drop table if exists ti;
create table ti (id int primary key) engine=innodb;
begin;
insert into ti values (0);
savepoint b;
insert into ti values (1);
savepoint a2;
insert into ti values (2);
savepoint b;
insert into ti values (3);
rollback to a2;
commit;
select * from ti;
id
0
1
drop table if exists tt;
create table tt (id int primary key) engine=tokudb;
begin;
insert into tt values (0);
savepoint b;
insert into tt values (1);
savepoint a2;
insert into tt values (2);
savepoint b;
insert into tt values (3);
rollback to a2;
commit;
select * from tt;
id
0
1
drop table ti,tt;
......@@ -20,9 +20,18 @@ while ($i < $n) {
}
commit;
# TokuDB may do index or range scan on this. Both are ok
replace_column 9 #;
--replace_result index index_or_range range index_or_range
# the plan for the following query should be a range scan. about 1 of 10 times,
# the plan is an index scan. the different scan type occurs because the query optimizer
# is handed different row counts by tokudb::records_in_range. the cost estimates made
# by the query optimizer are very close to begin with. sometimes, the cost of an index
# scan is less than the cost of a range scan.
#
# if a tokudb checkpoint occurs before this query is run, then the records_in_range
# function returns a larger than expected row estimate.
#
# column 4 is the join type (should be range or index)
# column 9 is the estimated key count
replace_column 4 range_or_index 9 #;
explain select id from t where id>0 limit 10;
replace_column 9 #;
......
# verify that duplicate savepoint names in innodb and tokudb work the same
source include/have_innodb.inc;
source include/have_tokudb.inc;
disable_warnings;
drop table if exists ti;
enable_warnings;
create table ti (id int primary key) engine=innodb;
begin;
insert into ti values (0);
savepoint b;
insert into ti values (1);
savepoint a2;
insert into ti values (2);
savepoint b;
insert into ti values (3);
rollback to a2;
commit;
select * from ti;
disable_warnings;
drop table if exists tt;
enable_warnings;
create table tt (id int primary key) engine=tokudb;
begin;
insert into tt values (0);
savepoint b;
insert into tt values (1);
savepoint a2;
insert into tt values (2);
savepoint b;
insert into tt values (3);
rollback to a2;
commit;
select * from tt;
drop table ti,tt;
......@@ -117,6 +117,7 @@ elif [ $build_type = enterprise ] ; then
github_download Tokutek/tokudb-backup-plugin $(git_tree $git_tag $backup_tree) tokudb-backup-plugin
mv tokudb-backup-plugin plugin
github_download Tokutek/backup-enterprise $(git_tree $git_tag $backup_tree) backup-enterprise
rm -rf plugin/tokudb-backup-plugin/backup
mv backup-enterprise/backup plugin/tokudb-backup-plugin
rm -rf backup-enterprise
fi
......
......@@ -62,7 +62,7 @@ tokudbengine=tokudb-engine
tokudbengine_tree=master
ftindex=ft-index
ftindex_tree=master
backup=backup-community
backup=tokudb-backup-plugin
backup_tree=master
cc=gcc
cxx=g++
......@@ -119,9 +119,9 @@ if [ $? != 0 ] ; then exit 1; fi
ln -s ../../$tokudbengine/storage/tokudb tokudb
if [ $? != 0 ] ; then exit 1; fi
popd
pushd $mysql_tree
pushd $mysql_tree/plugin
if [ $? != 0 ] ; then exit 1; fi
ln -s ../$backup/backup toku_backup
ln -s ../../$backup $backup
if [ $? != 0 ] ; then exit 1; fi
popd
pushd $mysql_tree/scripts
......
......@@ -218,15 +218,32 @@ namespace tokudb {
return error;
}
struct analyze_card_cursor_callback_extra {
int (*analyze_progress)(void *extra, uint64_t rows);
void *analyze_extra;
uint64_t *rows;
uint64_t *deleted_rows;
};
bool analyze_card_cursor_callback(void *extra, uint64_t deleted_rows) {
analyze_card_cursor_callback_extra *a_extra = static_cast<analyze_card_cursor_callback_extra *>(extra);
*a_extra->deleted_rows += deleted_rows;
int r = a_extra->analyze_progress(a_extra->analyze_extra, *a_extra->rows);
sql_print_information("tokudb analyze_card_cursor_callback %u %" PRIu64 " %" PRIu64, r, *a_extra->deleted_rows, deleted_rows);
return r != 0;
}
// Compute records per key for all key parts of the ith key of the table.
// For each key part, put records per key part in *rec_per_key_part[key_part_index].
// Returns 0 if success, otherwise an error number.
// TODO statistical dives into the FT
int analyze_card(DB *db, DB_TXN *txn, bool is_unique, uint64_t num_key_parts, uint64_t *rec_per_key_part,
int (*key_compare)(DB *, const DBT *, const DBT *, uint),
int (*analyze_progress)(void *extra, uint64_t rows), void *progress_extra) {
int (*analyze_progress)(void *extra, uint64_t rows), void *progress_extra,
uint64_t *return_rows, uint64_t *return_deleted_rows) {
int error = 0;
uint64_t rows = 0;
uint64_t deleted_rows = 0;
uint64_t unique_rows[num_key_parts];
if (is_unique && num_key_parts == 1) {
// dont compute for unique keys with a single part. we already know the answer.
......@@ -235,6 +252,8 @@ namespace tokudb {
DBC *cursor = NULL;
error = db->cursor(db, txn, &cursor, 0);
if (error == 0) {
analyze_card_cursor_callback_extra e = { analyze_progress, progress_extra, &rows, &deleted_rows };
cursor->c_set_check_interrupt_callback(cursor, analyze_card_cursor_callback, &e);
for (uint64_t i = 0; i < num_key_parts; i++)
unique_rows[i] = 1;
// stop looking when the entire dictionary was analyzed, or a cap on execution time was reached, or the analyze was killed.
......@@ -243,8 +262,8 @@ namespace tokudb {
while (1) {
error = cursor->c_get(cursor, &key, 0, DB_NEXT);
if (error != 0) {
if (error == DB_NOTFOUND)
error = 0; // eof is not an error
if (error == DB_NOTFOUND || error == TOKUDB_INTERRUPTED)
error = 0; // not an error
break;
}
rows++;
......@@ -287,10 +306,12 @@ namespace tokudb {
}
}
// return cardinality
if (error == 0 || error == ETIME) {
for (uint64_t i = 0; i < num_key_parts; i++)
rec_per_key_part[i] = rows / unique_rows[i];
}
if (return_rows)
*return_rows = rows;
if (return_deleted_rows)
*return_deleted_rows = deleted_rows;
for (uint64_t i = 0; i < num_key_parts; i++)
rec_per_key_part[i] = rows / unique_rows[i];
return error;
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment