Commit 84df6cc1 authored by Zardosht Kasheff's avatar Zardosht Kasheff Committed by Yoni Fogel

[t:1980], [t:1981], merge back to mainline

git-svn-id: file:///svn/mysql/tokudb-engine/src@14493 c7de825b-a66e-492c-adef-691d508d4ae1
parent 40edfbb8
...@@ -230,51 +230,6 @@ static int free_share(TOKUDB_SHARE * share, bool mutex_is_locked) { ...@@ -230,51 +230,6 @@ static int free_share(TOKUDB_SHARE * share, bool mutex_is_locked) {
return result; return result;
} }
static int get_name_length(const char *name) {
int n = 0;
const char *newname = name;
if (tokudb_data_dir) {
n += strlen(tokudb_data_dir) + 1;
if (strncmp("./", name, 2) == 0)
newname = name + 2;
}
n += strlen(newname);
n += strlen(ha_tokudb_ext);
return n;
}
//
// returns maximum length of dictionary name, such as key-NAME
// NAME_CHAR_LEN is max length of the key name, and have upper bound of 10 for key-
//
#define MAX_DICT_NAME_LEN NAME_CHAR_LEN + 10
//
// returns maximum length of path to a dictionary
//
static int get_max_dict_name_path_length(const char *tablename) {
int n = 0;
n += get_name_length(tablename);
n += 1; //for the '/'
n += MAX_DICT_NAME_LEN;
n += strlen(ha_tokudb_ext);
return n;
}
static void make_name(char *newname, const char *tablename, const char *dictname) {
const char *newtablename = tablename;
char *nn = newname;
if (tokudb_data_dir) {
nn += sprintf(nn, "%s/", tokudb_data_dir);
if (strncmp("./", tablename, 2) == 0)
newtablename = tablename + 2;
}
nn += sprintf(nn, "%s%s", newtablename, ha_tokudb_ext);
if (dictname)
nn += sprintf(nn, "/%s%s", dictname, ha_tokudb_ext);
}
#define HANDLE_INVALID_CURSOR() \ #define HANDLE_INVALID_CURSOR() \
if (cursor == NULL) { \ if (cursor == NULL) { \
...@@ -366,6 +321,15 @@ static int smart_dbt_do_nothing (DBT const *key, DBT const *row, void *context) ...@@ -366,6 +321,15 @@ static int smart_dbt_do_nothing (DBT const *key, DBT const *row, void *context)
return 0; return 0;
} }
static int smart_dbt_metacallback (DBT const *key, DBT const *row, void *context) {
DBT* val = (DBT *)context;
val->data = my_malloc(row->size, MYF(MY_WME|MY_ZEROFILL));
if (val->data == NULL) return ENOMEM;
memcpy(val->data, row->data, row->size);
val->size = row->size;
return 0;
}
static int static int
smart_dbt_callback_rowread_ptquery (DBT const *key, DBT const *row, void *context) { smart_dbt_callback_rowread_ptquery (DBT const *key, DBT const *row, void *context) {
...@@ -848,6 +812,183 @@ const uchar* unpack_toku_field_blob( ...@@ -848,6 +812,183 @@ const uchar* unpack_toku_field_blob(
} }
static int add_table_to_metadata(const char *name, TABLE* table) {
int error = 0;
DBT key;
DBT val;
DB_TXN* txn = NULL;
uchar hidden_primary_key = (table->s->primary_key >= MAX_KEY);
pthread_mutex_lock(&tokudb_meta_mutex);
error = db_env->txn_begin(db_env, 0, &txn, 0);
if (error) {
goto cleanup;
}
bzero((void *)&key, sizeof(key));
bzero((void *)&val, sizeof(val));
key.data = (void *)name;
key.size = strlen(name) + 1;
val.data = &hidden_primary_key;
val.size = sizeof(hidden_primary_key);
error = metadata_db->put(
metadata_db,
txn,
&key,
&val,
DB_YESOVERWRITE
);
cleanup:
if (txn) {
int r = !error ? txn->commit(txn,0) : txn->abort(txn);
assert(!r);
}
pthread_mutex_unlock(&tokudb_meta_mutex);
return error;
}
static int drop_table_from_metadata(const char *name) {
int error = 0;
DBT key;
DBT data;
DB_TXN* txn = NULL;
pthread_mutex_lock(&tokudb_meta_mutex);
error = db_env->txn_begin(db_env, 0, &txn, 0);
if (error) {
goto cleanup;
}
bzero((void *)&key, sizeof(key));
bzero((void *)&data, sizeof(data));
key.data = (void *)name;
key.size = strlen(name) + 1;
error = metadata_db->del(
metadata_db,
txn,
&key ,
DB_DELETE_ANY
);
cleanup:
if (txn) {
int r = !error ? txn->commit(txn,0) : txn->abort(txn);
assert(!r);
}
pthread_mutex_unlock(&tokudb_meta_mutex);
return error;
}
static int rename_table_in_metadata(const char *from, const char *to) {
int error = 0;
DBT from_key;
DBT to_key;
DBT val;
DB_TXN* txn = NULL;
pthread_mutex_lock(&tokudb_meta_mutex);
error = db_env->txn_begin(db_env, 0, &txn, 0);
if (error) {
goto cleanup;
}
bzero((void *)&from_key, sizeof(from_key));
bzero((void *)&to_key, sizeof(to_key));
bzero((void *)&val, sizeof(val));
from_key.data = (void *)from;
from_key.size = strlen(from) + 1;
to_key.data = (void *)to;
to_key.size = strlen(to) + 1;
error = metadata_db->getf_set(
metadata_db,
txn,
0,
&from_key,
smart_dbt_metacallback,
&val
);
if (error) {
goto cleanup;
}
error = metadata_db->put(
metadata_db,
txn,
&to_key,
&val,
DB_YESOVERWRITE
);
if (error) {
goto cleanup;
}
error = metadata_db->del(
metadata_db,
txn,
&from_key,
DB_DELETE_ANY
);
if (error) {
goto cleanup;
}
error = 0;
cleanup:
if (txn) {
int r = !error ? txn->commit(txn,0) : txn->abort(txn);
assert(!r);
}
my_free(val.data, MYF(MY_ALLOW_ZERO_PTR));
pthread_mutex_unlock(&tokudb_meta_mutex);
return error;
}
static int check_table_in_metadata(const char *name, bool* table_found) {
int error = 0;
DBT key;
DB_TXN* txn = NULL;
pthread_mutex_lock(&tokudb_meta_mutex);
error = db_env->txn_begin(db_env, 0, &txn, 0);
if (error) {
goto cleanup;
}
bzero((void *)&key, sizeof(key));
key.data = (void *)name;
key.size = strlen(name) + 1;
error = metadata_db->getf_set(
metadata_db,
txn,
0,
&key,
smart_dbt_do_nothing,
NULL
);
if (error == 0) {
*table_found = true;
}
else if (error == DB_NOTFOUND){
*table_found = false;
error = 0;
}
cleanup:
if (txn) {
error = txn->commit(txn,0);
}
pthread_mutex_unlock(&tokudb_meta_mutex);
return error;
}
ha_tokudb::ha_tokudb(handlerton * hton, TABLE_SHARE * table_arg):handler(hton, table_arg) ha_tokudb::ha_tokudb(handlerton * hton, TABLE_SHARE * table_arg):handler(hton, table_arg)
// flags defined in sql\handler.h // flags defined in sql\handler.h
{ {
...@@ -1033,10 +1174,24 @@ int ha_tokudb::initialize_share( ...@@ -1033,10 +1174,24 @@ int ha_tokudb::initialize_share(
u_int64_t num_rows = 0; u_int64_t num_rows = 0;
u_int32_t curr_blob_field_index = 0; u_int32_t curr_blob_field_index = 0;
u_int32_t max_var_bytes = 0; u_int32_t max_var_bytes = 0;
bool table_exists;
uint open_flags = (mode == O_RDONLY ? DB_RDONLY : 0) | DB_THREAD; uint open_flags = (mode == O_RDONLY ? DB_RDONLY : 0) | DB_THREAD;
open_flags += DB_AUTO_COMMIT; open_flags += DB_AUTO_COMMIT;
THD* thd = ha_thd();
DBUG_PRINT("info", ("share->use_count %u", share->use_count)); DBUG_PRINT("info", ("share->use_count %u", share->use_count));
table_exists = true;
error = check_table_in_metadata(name, &table_exists);
if (error) {
goto exit;
}
if (!table_exists) {
sql_print_error("table %s does not exist in metadata, was it moved from someplace else? Not opening table", name);
error = HA_ADMIN_FAILED;
goto exit;
}
newname = (char *)my_malloc( newname = (char *)my_malloc(
get_max_dict_name_path_length(name), get_max_dict_name_path_length(name),
MYF(MY_WME|MY_ZEROFILL) MYF(MY_WME|MY_ZEROFILL)
...@@ -4331,7 +4486,7 @@ THR_LOCK_DATA **ha_tokudb::store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_l ...@@ -4331,7 +4486,7 @@ THR_LOCK_DATA **ha_tokudb::store_lock(THD * thd, THR_LOCK_DATA ** to, enum thr_l
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) { if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) {
/* If we are not doing a LOCK TABLE, then allow multiple writers */ /* If we are not doing a LOCK TABLE, then allow multiple writers */
if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) && if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && lock_type <= TL_WRITE) &&
!thd->in_lock_tables && thd_sql_command(thd) != SQLCOM_TRUNCATE) { !thd->in_lock_tables && thd_sql_command(thd) != SQLCOM_TRUNCATE && !thd_tablespace_op(thd)) {
lock_type = TL_WRITE_ALLOW_WRITE; lock_type = TL_WRITE_ALLOW_WRITE;
} }
lock.type = lock_type; lock.type = lock_type;
...@@ -4702,6 +4857,12 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in ...@@ -4702,6 +4857,12 @@ int ha_tokudb::create(const char *name, TABLE * form, HA_CREATE_INFO * create_in
} }
} }
error = add_table_to_metadata(name, form);
if (error) {
goto cleanup;
}
error = 0;
cleanup: cleanup:
if (status_block != NULL) { if (status_block != NULL) {
status_block->close(status_block, 0); status_block->close(status_block, 0);
...@@ -4709,12 +4870,28 @@ cleanup: ...@@ -4709,12 +4870,28 @@ cleanup:
if (error && dir_path_made) { if (error && dir_path_made) {
rmall(dirname); rmall(dirname);
} }
if (error) {
drop_table_from_metadata(name);
}
my_free(newname, MYF(MY_ALLOW_ZERO_PTR)); my_free(newname, MYF(MY_ALLOW_ZERO_PTR));
my_free(dirname, MYF(MY_ALLOW_ZERO_PTR)); my_free(dirname, MYF(MY_ALLOW_ZERO_PTR));
my_free(row_desc_buff, MYF(MY_ALLOW_ZERO_PTR)); my_free(row_desc_buff, MYF(MY_ALLOW_ZERO_PTR));
TOKUDB_DBUG_RETURN(error); TOKUDB_DBUG_RETURN(error);
} }
int ha_tokudb::discard_or_import_tablespace(my_bool discard) {
/*
if (discard) {
my_errno=HA_ERR_WRONG_COMMAND;
return my_errno;
}
return add_table_to_metadata(share->table_name);
*/
my_errno=HA_ERR_WRONG_COMMAND;
return my_errno;
}
// //
// Drops table // Drops table
// Parameters: // Parameters:
...@@ -4725,9 +4902,14 @@ cleanup: ...@@ -4725,9 +4902,14 @@ cleanup:
// //
int ha_tokudb::delete_table(const char *name) { int ha_tokudb::delete_table(const char *name) {
TOKUDB_DBUG_ENTER("ha_tokudb::delete_table"); TOKUDB_DBUG_ENTER("ha_tokudb::delete_table");
int error; int error;
// remove all of the dictionaries in the table directory
char* newname = NULL; char* newname = NULL;
// remove all of the dictionaries in the table directory
error = drop_table_from_metadata(name);
if (error) {
goto cleanup;
}
newname = (char *)my_malloc(get_max_dict_name_path_length(name), MYF(MY_WME|MY_ZEROFILL)); newname = (char *)my_malloc(get_max_dict_name_path_length(name), MYF(MY_WME|MY_ZEROFILL));
if (newname == NULL) { if (newname == NULL) {
error = ENOMEM; error = ENOMEM;
...@@ -4785,6 +4967,8 @@ int ha_tokudb::rename_table(const char *from, const char *to) { ...@@ -4785,6 +4967,8 @@ int ha_tokudb::rename_table(const char *from, const char *to) {
error = my_errno = errno; error = my_errno = errno;
} }
rename_table_in_metadata(from, to);
cleanup: cleanup:
{ {
int r = db_env->checkpointing_resume(db_env); int r = db_env->checkpointing_resume(db_env);
......
...@@ -415,6 +415,7 @@ public: ...@@ -415,6 +415,7 @@ public:
// delete all rows from the table // delete all rows from the table
// effect: all dictionaries, including the main and indexes, should be empty // effect: all dictionaries, including the main and indexes, should be empty
int discard_or_import_tablespace(my_bool discard);
int delete_all_rows(); int delete_all_rows();
void extract_hidden_primary_key(uint keynr, DBT const *row, DBT const *found_key); void extract_hidden_primary_key(uint keynr, DBT const *row, DBT const *found_key);
void read_key_only(uchar * buf, uint keynr, DBT const *row, DBT const *found_key); void read_key_only(uchar * buf, uint keynr, DBT const *row, DBT const *found_key);
......
...@@ -1394,7 +1394,17 @@ exit: ...@@ -1394,7 +1394,17 @@ exit:
} }
int tokudb_cmp_dbt_key(DB *file, const DBT *keya, const DBT *keyb) { int tokudb_cmp_dbt_key(DB *file, const DBT *keya, const DBT *keyb) {
int cmp = tokudb_compare_two_keys( int cmp;
if (file->descriptor->size == 0) {
int num_bytes_cmp = keya->size < keyb->size ?
keya->size : keyb->size;
cmp = memcmp(keya->data,keyb->data,num_bytes_cmp);
if (cmp == 0 && (keya->size != keyb->size)) {
cmp = keya->size < keyb->size ? 1 : -1;
}
}
else {
cmp = tokudb_compare_two_keys(
keya->data, keya->data,
keya->size, keya->size,
keyb->data, keyb->data,
...@@ -1403,6 +1413,7 @@ int tokudb_cmp_dbt_key(DB *file, const DBT *keya, const DBT *keyb) { ...@@ -1403,6 +1413,7 @@ int tokudb_cmp_dbt_key(DB *file, const DBT *keya, const DBT *keyb) {
(*(u_int32_t *)file->descriptor->data) - 4, (*(u_int32_t *)file->descriptor->data) - 4,
false false
); );
}
return cmp; return cmp;
} }
......
...@@ -10,6 +10,11 @@ extern "C" { ...@@ -10,6 +10,11 @@ extern "C" {
extern ulong tokudb_debug; extern ulong tokudb_debug;
//
// returns maximum length of dictionary name, such as key-NAME
// NAME_CHAR_LEN is max length of the key name, and have upper bound of 10 for key-
//
#define MAX_DICT_NAME_LEN NAME_CHAR_LEN + 10
// QQQ how to tune these? // QQQ how to tune these?
...@@ -86,6 +91,47 @@ typedef struct st_tokudb_trx_data { ...@@ -86,6 +91,47 @@ typedef struct st_tokudb_trx_data {
HA_TOKU_ISO_LEVEL iso_level; HA_TOKU_ISO_LEVEL iso_level;
} tokudb_trx_data; } tokudb_trx_data;
extern char *tokudb_data_dir;
extern const char *ha_tokudb_ext;
static int get_name_length(const char *name) {
int n = 0;
const char *newname = name;
if (tokudb_data_dir) {
n += strlen(tokudb_data_dir) + 1;
if (strncmp("./", name, 2) == 0)
newname = name + 2;
}
n += strlen(newname);
n += strlen(ha_tokudb_ext);
return n;
}
//
// returns maximum length of path to a dictionary
//
static int get_max_dict_name_path_length(const char *tablename) {
int n = 0;
n += get_name_length(tablename);
n += 1; //for the '/'
n += MAX_DICT_NAME_LEN;
n += strlen(ha_tokudb_ext);
return n;
}
static void make_name(char *newname, const char *tablename, const char *dictname) {
const char *newtablename = tablename;
char *nn = newname;
if (tokudb_data_dir) {
nn += sprintf(nn, "%s/", tokudb_data_dir);
if (strncmp("./", tablename, 2) == 0)
newtablename = tablename + 2;
}
nn += sprintf(nn, "%s%s", newtablename, ha_tokudb_ext);
if (dictname)
nn += sprintf(nn, "/%s%s", dictname, ha_tokudb_ext);
}
#endif #endif
...@@ -28,6 +28,7 @@ extern "C" { ...@@ -28,6 +28,7 @@ extern "C" {
#undef HAVE_DTRACE #undef HAVE_DTRACE
#undef _DTRACE_VERSION #undef _DTRACE_VERSION
#define TOKU_METADB_NAME ".\\tokudb_meta.tokudb"
static inline void *thd_data_get(THD *thd, int slot) { static inline void *thd_data_get(THD *thd, int slot) {
#if MYSQL_VERSION_ID <= 50123 #if MYSQL_VERSION_ID <= 50123
...@@ -47,7 +48,6 @@ static inline void thd_data_set(THD *thd, int slot, void *data) { ...@@ -47,7 +48,6 @@ static inline void thd_data_set(THD *thd, int slot, void *data) {
static uchar *tokudb_get_key(TOKUDB_SHARE * share, size_t * length, my_bool not_used __attribute__ ((unused))) { static uchar *tokudb_get_key(TOKUDB_SHARE * share, size_t * length, my_bool not_used __attribute__ ((unused))) {
*length = share->table_name_length; *length = share->table_name_length;
return (uchar *) share->table_name; return (uchar *) share->table_name;
...@@ -78,8 +78,10 @@ const char *ha_tokudb_ext = ".tokudb"; ...@@ -78,8 +78,10 @@ const char *ha_tokudb_ext = ".tokudb";
char *tokudb_data_dir; char *tokudb_data_dir;
ulong tokudb_debug; ulong tokudb_debug;
DB_ENV *db_env; DB_ENV *db_env;
DB* metadata_db;
HASH tokudb_open_tables; HASH tokudb_open_tables;
pthread_mutex_t tokudb_mutex; pthread_mutex_t tokudb_mutex;
pthread_mutex_t tokudb_meta_mutex;
//my_bool tokudb_shared_data = FALSE; //my_bool tokudb_shared_data = FALSE;
...@@ -125,10 +127,13 @@ static int tokudb_init_func(void *p) { ...@@ -125,10 +127,13 @@ static int tokudb_init_func(void *p) {
goto error; goto error;
} }
#endif #endif
db_env = NULL;
metadata_db = NULL;
tokudb_hton = (handlerton *) p; tokudb_hton = (handlerton *) p;
VOID(pthread_mutex_init(&tokudb_mutex, MY_MUTEX_INIT_FAST)); VOID(pthread_mutex_init(&tokudb_mutex, MY_MUTEX_INIT_FAST));
VOID(pthread_mutex_init(&tokudb_meta_mutex, MY_MUTEX_INIT_FAST));
(void) hash_init(&tokudb_open_tables, system_charset_info, 32, 0, 0, (hash_get_key) tokudb_get_key, 0, 0); (void) hash_init(&tokudb_open_tables, system_charset_info, 32, 0, 0, (hash_get_key) tokudb_get_key, 0, 0);
tokudb_hton->state = SHOW_OPTION_YES; tokudb_hton->state = SHOW_OPTION_YES;
...@@ -287,9 +292,41 @@ static int tokudb_init_func(void *p) { ...@@ -287,9 +292,41 @@ static int tokudb_init_func(void *p) {
assert(!r); assert(!r);
r = db_create(&metadata_db, db_env, 0);
if (r) {
DBUG_PRINT("info", ("failed to create metadata db %d\n", r));
goto error;
}
metadata_db->set_bt_compare(metadata_db, tokudb_cmp_dbt_key);
r= metadata_db->open(metadata_db, 0, TOKU_METADB_NAME, NULL, DB_BTREE, DB_THREAD|DB_AUTO_COMMIT, 0);
if (r) {
sql_print_error("No metadata table exists, so creating it");
r= metadata_db->open(metadata_db, NULL, TOKU_METADB_NAME, NULL, DB_BTREE, DB_THREAD | DB_CREATE, my_umask);
if (r) {
goto error;
}
metadata_db->close(metadata_db,0);
r = db_create(&metadata_db, db_env, 0);
if (r) {
DBUG_PRINT("info", ("failed to create metadata db %d\n", r));
goto error;
}
metadata_db->set_bt_compare(metadata_db, tokudb_cmp_dbt_key);
r= metadata_db->open(metadata_db, 0, TOKU_METADB_NAME, NULL, DB_BTREE, DB_THREAD|DB_AUTO_COMMIT, 0);
if (r) {
goto error;
}
}
DBUG_RETURN(FALSE); DBUG_RETURN(FALSE);
error: error:
if (metadata_db) {
metadata_db->close(metadata_db, 0);
}
if (db_env) { if (db_env) {
db_env->close(db_env, 0); db_env->close(db_env, 0);
db_env = 0; db_env = 0;
...@@ -305,6 +342,7 @@ static int tokudb_done_func(void *p) { ...@@ -305,6 +342,7 @@ static int tokudb_done_func(void *p) {
error = 1; error = 1;
hash_free(&tokudb_open_tables); hash_free(&tokudb_open_tables);
pthread_mutex_destroy(&tokudb_mutex); pthread_mutex_destroy(&tokudb_mutex);
pthread_mutex_destroy(&tokudb_meta_mutex);
#if defined(_WIN32) #if defined(_WIN32)
toku_ydb_destroy(); toku_ydb_destroy();
#endif #endif
...@@ -320,6 +358,9 @@ static handler *tokudb_create_handler(handlerton * hton, TABLE_SHARE * table, ME ...@@ -320,6 +358,9 @@ static handler *tokudb_create_handler(handlerton * hton, TABLE_SHARE * table, ME
int tokudb_end(handlerton * hton, ha_panic_function type) { int tokudb_end(handlerton * hton, ha_panic_function type) {
TOKUDB_DBUG_ENTER("tokudb_end"); TOKUDB_DBUG_ENTER("tokudb_end");
int error = 0; int error = 0;
if (metadata_db) {
metadata_db->close(metadata_db, 0);
}
if (db_env) { if (db_env) {
if (tokudb_init_flags & DB_INIT_LOG) if (tokudb_init_flags & DB_INIT_LOG)
tokudb_cleanup_log_files(); tokudb_cleanup_log_files();
...@@ -468,6 +509,129 @@ static int tokudb_release_savepoint(handlerton * hton, THD * thd, void *savepoin ...@@ -468,6 +509,129 @@ static int tokudb_release_savepoint(handlerton * hton, THD * thd, void *savepoin
#endif #endif
static bool tokudb_show_engine_status(THD * thd, stat_print_fn * stat_print) {
TOKUDB_DBUG_ENTER("tokudb_show_engine_status");
int error;
u_int64_t num_bytes_in_db = 0;
DB* curr_db = NULL;
DB_TXN* txn = NULL;
DBC* tmp_cursor = NULL;
DBT curr_key;
DBT curr_val;
char data_amount_msg[50] = {0};
memset(&curr_key, 0, sizeof curr_key);
memset(&curr_val, 0, sizeof curr_val);
pthread_mutex_lock(&tokudb_meta_mutex);
error = db_env->txn_begin(db_env, 0, &txn, 0);
if (error) {
goto cleanup;
}
error = metadata_db->cursor(metadata_db, txn, &tmp_cursor, 0);
if (error) {
goto cleanup;
}
while (error == 0) {
//
// do not need this to be super fast, so use old simple API
//
error = tmp_cursor->c_get(
tmp_cursor,
&curr_key,
&curr_val,
DB_NEXT
);
if (!error) {
char* name = (char *)curr_key.data;
char* newname = NULL;
char name_buff[FN_REFLEN];
char* fn_ret = NULL;
u_int64_t curr_num_bytes = 0;
DB_BTREE_STAT64 dict_stats;
newname = (char *)my_malloc(
get_max_dict_name_path_length(name),
MYF(MY_WME|MY_ZEROFILL)
);
if (newname == NULL) {
error = ENOMEM;
goto cleanup;
}
make_name(newname, name, "main");
fn_ret = fn_format(name_buff, newname, "", 0, MY_UNPACK_FILENAME|MY_SAFE_PATH);
error = db_create(&curr_db, db_env, 0);
if (error) { goto cleanup; }
error = curr_db->open(curr_db, 0, name_buff, NULL, DB_BTREE, DB_THREAD, 0);
if (error) { goto cleanup; }
error = curr_db->stat64(
curr_db,
txn,
&dict_stats
);
if (error) { goto cleanup; }
DBUG_PRINT("info", ("size of %s is %lld hidden_primary_key %d!!\n", name_buff, dict_stats.bt_dsize, *(uchar *)curr_val.data));
curr_num_bytes = dict_stats.bt_dsize;
if (*(uchar *)curr_val.data) {
//
// in this case, we have a hidden primary key, do not
// want to report space taken up by the hidden primary key to the user
//
u_int64_t hpk_space = TOKUDB_HIDDEN_PRIMARY_KEY_LENGTH*dict_stats.bt_ndata;
curr_num_bytes = (hpk_space > curr_num_bytes) ? 0 : curr_num_bytes - hpk_space;
}
else {
//
// one infinity byte per key needs to be subtracted
//
u_int64_t inf_byte_space = dict_stats.bt_ndata;
curr_num_bytes = (inf_byte_space > curr_num_bytes) ? 0 : curr_num_bytes - inf_byte_space;
}
num_bytes_in_db += curr_num_bytes;
curr_db->close(curr_db, 0);
curr_db = NULL;
my_free(newname,MYF(MY_ALLOW_ZERO_PTR));
}
}
sprintf(data_amount_msg, "Number of bytes in database: %lld", num_bytes_in_db);
stat_print(
thd,
tokudb_hton_name,
tokudb_hton_name_length,
"Data in tables",
strlen("Data in tables"),
data_amount_msg,
strlen(data_amount_msg)
);
error = 0;
cleanup:
if (curr_db) {
curr_db->close(curr_db, 0);
}
if (tmp_cursor) {
tmp_cursor->c_close(tmp_cursor);
}
if (txn) {
txn->commit(txn, 0);
}
if (error) {
printf("got an error %d in show engine status\n", error);
}
pthread_mutex_unlock(&tokudb_meta_mutex);
TOKUDB_DBUG_RETURN(error);
}
static bool tokudb_show_logs(THD * thd, stat_print_fn * stat_print) { static bool tokudb_show_logs(THD * thd, stat_print_fn * stat_print) {
TOKUDB_DBUG_ENTER("tokudb_show_logs"); TOKUDB_DBUG_ENTER("tokudb_show_logs");
char **all_logs, **free_logs, **a, **f; char **all_logs, **free_logs, **a, **f;
...@@ -512,11 +676,16 @@ static bool tokudb_show_logs(THD * thd, stat_print_fn * stat_print) { ...@@ -512,11 +676,16 @@ static bool tokudb_show_logs(THD * thd, stat_print_fn * stat_print) {
bool tokudb_show_status(handlerton * hton, THD * thd, stat_print_fn * stat_print, enum ha_stat_type stat_type) { bool tokudb_show_status(handlerton * hton, THD * thd, stat_print_fn * stat_print, enum ha_stat_type stat_type) {
switch (stat_type) { switch (stat_type) {
case HA_ENGINE_STATUS:
return tokudb_show_engine_status(thd, stat_print);
break;
case HA_ENGINE_LOGS: case HA_ENGINE_LOGS:
return tokudb_show_logs(thd, stat_print); return tokudb_show_logs(thd, stat_print);
break;
default: default:
return FALSE; break;
} }
return FALSE;
} }
static void tokudb_print_error(const DB_ENV * db_env, const char *db_errpfx, const char *buffer) { static void tokudb_print_error(const DB_ENV * db_env, const char *db_errpfx, const char *buffer) {
......
...@@ -6,9 +6,8 @@ ...@@ -6,9 +6,8 @@
extern handlerton *tokudb_hton; extern handlerton *tokudb_hton;
extern const char *ha_tokudb_ext;
extern char *tokudb_data_dir;
extern DB_ENV *db_env; extern DB_ENV *db_env;
extern DB *metadata_db;
// thread variables // thread variables
...@@ -16,6 +15,7 @@ extern DB_ENV *db_env; ...@@ -16,6 +15,7 @@ extern DB_ENV *db_env;
extern HASH tokudb_open_tables; extern HASH tokudb_open_tables;
extern pthread_mutex_t tokudb_mutex; extern pthread_mutex_t tokudb_mutex;
extern pthread_mutex_t tokudb_meta_mutex;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment