Commit 691e2b71 authored by Rich Prohaska's avatar Rich Prohaska Committed by Yoni Fogel

refs #6021 use ha_statistic_increment in the tokudb handlerton for table stats

git-svn-id: file:///svn/mysql/tokudb-engine/tokudb-engine@53248 c7de825b-a66e-492c-adef-691d508d4ae1
parent a0bb1fd4
...@@ -3805,7 +3805,7 @@ int ha_tokudb::write_row(uchar * record) { ...@@ -3805,7 +3805,7 @@ int ha_tokudb::write_row(uchar * record) {
// some crap that needs to be done because MySQL does not properly abstract // some crap that needs to be done because MySQL does not properly abstract
// this work away from us, namely filling in auto increment and setting auto timestamp // this work away from us, namely filling in auto increment and setting auto timestamp
// //
statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status); ha_statistic_increment(&SSV::ha_write_count);
#if MYSQL_VERSION_ID < 50600 #if MYSQL_VERSION_ID < 50600
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) { if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) {
table->timestamp_field->set_time(); table->timestamp_field->set_time();
...@@ -4005,7 +4005,7 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) { ...@@ -4005,7 +4005,7 @@ int ha_tokudb::update_row(const uchar * old_row, uchar * new_row) {
memset((void *) &old_prim_row, 0, sizeof(old_prim_row)); memset((void *) &old_prim_row, 0, sizeof(old_prim_row));
statistic_increment(table->in_use->status_var.ha_update_count, &LOCK_status); ha_statistic_increment(&SSV::ha_update_count);
#if MYSQL_VERSION_ID < 50600 #if MYSQL_VERSION_ID < 50600
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) { if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) {
table->timestamp_field->set_time(); table->timestamp_field->set_time();
...@@ -4165,7 +4165,7 @@ int ha_tokudb::delete_row(const uchar * record) { ...@@ -4165,7 +4165,7 @@ int ha_tokudb::delete_row(const uchar * record) {
uint curr_num_DBs; uint curr_num_DBs;
tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);;
statistic_increment(table->in_use->status_var.ha_delete_count, &LOCK_status); ha_statistic_increment(&SSV::ha_delete_count);
// //
// grab reader lock on numDBs_lock // grab reader lock on numDBs_lock
...@@ -4643,7 +4643,7 @@ int ha_tokudb::read_full_row(uchar * buf) { ...@@ -4643,7 +4643,7 @@ int ha_tokudb::read_full_row(uchar * buf) {
// //
int ha_tokudb::index_next_same(uchar * buf, const uchar * key, uint keylen) { int ha_tokudb::index_next_same(uchar * buf, const uchar * key, uint keylen) {
TOKUDB_DBUG_ENTER("ha_tokudb::index_next_same"); TOKUDB_DBUG_ENTER("ha_tokudb::index_next_same");
statistic_increment(table->in_use->status_var.ha_read_next_count, &LOCK_status); ha_statistic_increment(&SSV::ha_read_next_count);
DBT curr_key; DBT curr_key;
DBT found_key; DBT found_key;
...@@ -4700,7 +4700,7 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_ ...@@ -4700,7 +4700,7 @@ int ha_tokudb::index_read(uchar * buf, const uchar * key, uint key_len, enum ha_
HANDLE_INVALID_CURSOR(); HANDLE_INVALID_CURSOR();
table->in_use->status_var.ha_read_key_count++; ha_statistic_increment(&SSV::ha_read_key_count);
memset((void *) &row, 0, sizeof(row)); memset((void *) &row, 0, sizeof(row));
info.ha = this; info.ha = this;
...@@ -5196,7 +5196,7 @@ cleanup: ...@@ -5196,7 +5196,7 @@ cleanup:
// //
int ha_tokudb::index_next(uchar * buf) { int ha_tokudb::index_next(uchar * buf) {
TOKUDB_DBUG_ENTER("ha_tokudb::index_next"); TOKUDB_DBUG_ENTER("ha_tokudb::index_next");
statistic_increment(table->in_use->status_var.ha_read_next_count, &LOCK_status); ha_statistic_increment(&SSV::ha_read_next_count);
int error = get_next(buf, 1); int error = get_next(buf, 1);
TOKUDB_DBUG_RETURN(error); TOKUDB_DBUG_RETURN(error);
} }
...@@ -5218,7 +5218,7 @@ int ha_tokudb::index_read_last(uchar * buf, const uchar * key, uint key_len) { ...@@ -5218,7 +5218,7 @@ int ha_tokudb::index_read_last(uchar * buf, const uchar * key, uint key_len) {
// //
int ha_tokudb::index_prev(uchar * buf) { int ha_tokudb::index_prev(uchar * buf) {
TOKUDB_DBUG_ENTER("ha_tokudb::index_prev"); TOKUDB_DBUG_ENTER("ha_tokudb::index_prev");
statistic_increment(table->in_use->status_var.ha_read_next_count, &LOCK_status); ha_statistic_increment(&SSV::ha_read_next_count);
int error = get_next(buf, -1); int error = get_next(buf, -1);
TOKUDB_DBUG_RETURN(error); TOKUDB_DBUG_RETURN(error);
} }
...@@ -5242,7 +5242,7 @@ int ha_tokudb::index_first(uchar * buf) { ...@@ -5242,7 +5242,7 @@ int ha_tokudb::index_first(uchar * buf) {
tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);;
HANDLE_INVALID_CURSOR(); HANDLE_INVALID_CURSOR();
statistic_increment(table->in_use->status_var.ha_read_first_count, &LOCK_status); ha_statistic_increment(&SSV::ha_read_first_count);
info.ha = this; info.ha = this;
info.buf = buf; info.buf = buf;
...@@ -5285,7 +5285,7 @@ int ha_tokudb::index_last(uchar * buf) { ...@@ -5285,7 +5285,7 @@ int ha_tokudb::index_last(uchar * buf) {
tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);; tokudb_trx_data* trx = (tokudb_trx_data *) thd_data_get(thd, tokudb_hton->slot);;
HANDLE_INVALID_CURSOR(); HANDLE_INVALID_CURSOR();
statistic_increment(table->in_use->status_var.ha_read_last_count, &LOCK_status); ha_statistic_increment(&SSV::ha_read_last_count);
info.ha = this; info.ha = this;
info.buf = buf; info.buf = buf;
...@@ -5366,7 +5366,7 @@ int ha_tokudb::rnd_end() { ...@@ -5366,7 +5366,7 @@ int ha_tokudb::rnd_end() {
// //
int ha_tokudb::rnd_next(uchar * buf) { int ha_tokudb::rnd_next(uchar * buf) {
TOKUDB_DBUG_ENTER("ha_tokudb::ha_tokudb::rnd_next"); TOKUDB_DBUG_ENTER("ha_tokudb::ha_tokudb::rnd_next");
statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, &LOCK_status); ha_statistic_increment(&SSV::ha_read_rnd_next_count);
int error = get_next(buf, 1); int error = get_next(buf, 1);
TOKUDB_DBUG_RETURN(error); TOKUDB_DBUG_RETURN(error);
} }
...@@ -5445,7 +5445,7 @@ int ha_tokudb::rnd_pos(uchar * buf, uchar * pos) { ...@@ -5445,7 +5445,7 @@ int ha_tokudb::rnd_pos(uchar * buf, uchar * pos) {
DBT* key = get_pos(&db_pos, pos); DBT* key = get_pos(&db_pos, pos);
unpack_entire_row = true; unpack_entire_row = true;
statistic_increment(table->in_use->status_var.ha_read_rnd_count, &LOCK_status); ha_statistic_increment(&SSV::ha_read_rnd_count);
tokudb_active_index = MAX_KEY; tokudb_active_index = MAX_KEY;
info.ha = this; info.ha = this;
......
...@@ -454,7 +454,7 @@ public: ...@@ -454,7 +454,7 @@ public:
int delete_table(const char *name); int delete_table(const char *name);
int rename_table(const char *from, const char *to); int rename_table(const char *from, const char *to);
int optimize(THD * thd, HA_CHECK_OPT * check_opt); int optimize(THD * thd, HA_CHECK_OPT * check_opt);
#if 0 #if TOKU_INCLUDE_ANALYZE
int analyze(THD * thd, HA_CHECK_OPT * check_opt); int analyze(THD * thd, HA_CHECK_OPT * check_opt);
#endif #endif
int write_row(uchar * buf); int write_row(uchar * buf);
......
#if TOKU_INCLUDE_ANALYZE #if TOKU_INCLUDE_ANALYZE
//
// This function will probably need to be redone from scratch int ha_tokudb::analyze(THD *thd, HA_CHECK_OPT *check_opt) {
// if we ever choose to implement it TOKUDB_DBUG_ENTER("ha_tokudb::analyze");
// TOKUDB_DBUG_RETURN(HA_ADMIN_OK);
int
ha_tokudb::analyze(THD * thd, HA_CHECK_OPT * check_opt) {
uint i;
DB_BTREE_STAT *stat = 0;
DB_TXN_STAT *txn_stat_ptr = 0;
tokudb_trx_data *trx = (tokudb_trx_data *) thd->ha_data[tokudb_hton->slot];
DBUG_ASSERT(trx);
for (i = 0; i < table_share->keys; i++) {
if (stat) {
free(stat);
stat = 0;
}
if ((key_file[i]->stat) (key_file[i], trx->all, (void *) &stat, 0))
goto err;
share->rec_per_key[i] = (stat->bt_ndata / (stat->bt_nkeys ? stat->bt_nkeys : 1));
}
/* A hidden primary key is not in key_file[] */
if (hidden_primary_key) {
if (stat) {
free(stat);
stat = 0;
}
if ((file->stat) (file, trx->all, (void *) &stat, 0))
goto err;
}
pthread_mutex_lock(&share->mutex);
share->status |= STATUS_TOKUDB_ANALYZE; // Save status on close
share->version++; // Update stat in table
pthread_mutex_unlock(&share->mutex);
update_status(share, table); // Write status to file
if (stat)
free(stat);
return ((share->status & STATUS_TOKUDB_ANALYZE) ? HA_ADMIN_FAILED : HA_ADMIN_OK);
err:
if (stat)
free(stat);
return HA_ADMIN_FAILED;
} }
#endif #endif
static int static int hot_poll_fun(void *extra, float progress) {
hot_poll_fun(void *extra, float progress) {
HOT_OPTIMIZE_CONTEXT context = (HOT_OPTIMIZE_CONTEXT)extra; HOT_OPTIMIZE_CONTEXT context = (HOT_OPTIMIZE_CONTEXT)extra;
if (context->thd->killed) { if (context->thd->killed) {
sprintf(context->write_status_msg, "The process has been killed, aborting hot optimize."); sprintf(context->write_status_msg, "The process has been killed, aborting hot optimize.");
...@@ -71,8 +32,7 @@ hot_poll_fun(void *extra, float progress) { ...@@ -71,8 +32,7 @@ hot_poll_fun(void *extra, float progress) {
volatile int ha_tokudb_optimize_wait = 0; // debug volatile int ha_tokudb_optimize_wait = 0; // debug
// flatten all DB's in this table, to do so, peform hot optimize on each db // flatten all DB's in this table, to do so, peform hot optimize on each db
int int ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) {
ha_tokudb::optimize(THD * thd, HA_CHECK_OPT * check_opt) {
TOKUDB_DBUG_ENTER("ha_tokudb::optimize"); TOKUDB_DBUG_ENTER("ha_tokudb::optimize");
while (ha_tokudb_optimize_wait) sleep(1); // debug while (ha_tokudb_optimize_wait) sleep(1); // debug
...@@ -117,13 +77,11 @@ cleanup: ...@@ -117,13 +77,11 @@ cleanup:
TOKUDB_DBUG_RETURN(error); TOKUDB_DBUG_RETURN(error);
} }
struct check_context { struct check_context {
THD *thd; THD *thd;
}; };
static int static int ha_tokudb_check_progress(void *extra, float progress) {
ha_tokudb_check_progress(void *extra, float progress) {
struct check_context *context = (struct check_context *) extra; struct check_context *context = (struct check_context *) extra;
int result = 0; int result = 0;
if (context->thd->killed) if (context->thd->killed)
...@@ -131,8 +89,7 @@ ha_tokudb_check_progress(void *extra, float progress) { ...@@ -131,8 +89,7 @@ ha_tokudb_check_progress(void *extra, float progress) {
return result; return result;
} }
static void static void ha_tokudb_check_info(THD *thd, TABLE *table, const char *msg) {
ha_tokudb_check_info(THD *thd, TABLE *table, const char *msg) {
if (thd->vio_ok()) { if (thd->vio_ok()) {
char tablename[256]; char tablename[256];
snprintf(tablename, sizeof tablename, "%s.%s", table->s->db.str, table->s->table_name.str); snprintf(tablename, sizeof tablename, "%s.%s", table->s->db.str, table->s->table_name.str);
...@@ -148,8 +105,7 @@ ha_tokudb_check_info(THD *thd, TABLE *table, const char *msg) { ...@@ -148,8 +105,7 @@ ha_tokudb_check_info(THD *thd, TABLE *table, const char *msg) {
volatile int ha_tokudb_check_verbose = 0; // debug volatile int ha_tokudb_check_verbose = 0; // debug
volatile int ha_tokudb_check_wait = 0; // debug volatile int ha_tokudb_check_wait = 0; // debug
int int ha_tokudb::check(THD *thd, HA_CHECK_OPT *check_opt) {
ha_tokudb::check(THD *thd, HA_CHECK_OPT *check_opt) {
TOKUDB_DBUG_ENTER("check"); TOKUDB_DBUG_ENTER("check");
while (ha_tokudb_check_wait) sleep(1); // debug while (ha_tokudb_check_wait) sleep(1); // debug
......
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#define TOKU_PARTITION_WRITE_FRM_DATA 1 #define TOKU_PARTITION_WRITE_FRM_DATA 1
#define TOKU_INCLUDE_WRITE_FRM_DATA 1 #define TOKU_INCLUDE_WRITE_FRM_DATA 1
#define TOKU_INCLUDE_UPSERT 1 #define TOKU_INCLUDE_UPSERT 1
#define TOKU_INCLUDE_ANALYZE 0
#elif 50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599 #elif 50500 <= MYSQL_VERSION_ID && MYSQL_VERSION_ID <= 50599
#define TOKU_INCLUDE_ALTER_56 1 #define TOKU_INCLUDE_ALTER_56 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment