Commit c5fb1295 authored by Zardosht Kasheff's avatar Zardosht Kasheff Committed by Yoni Fogel

refs #4606, remove ct->size_max

git-svn-id: file:///svn/toku/tokudb@45698 c7de825b-a66e-492c-adef-691d508d4ae1
parent a3b24571
...@@ -58,7 +58,6 @@ status_init(void) { ...@@ -58,7 +58,6 @@ status_init(void) {
STATUS_INIT(CT_PREFETCHES, UINT64, "prefetches"); STATUS_INIT(CT_PREFETCHES, UINT64, "prefetches");
STATUS_INIT(CT_SIZE_CURRENT, UINT64, "size current"); STATUS_INIT(CT_SIZE_CURRENT, UINT64, "size current");
STATUS_INIT(CT_SIZE_LIMIT, UINT64, "size limit"); STATUS_INIT(CT_SIZE_LIMIT, UINT64, "size limit");
STATUS_INIT(CT_SIZE_MAX, UINT64, "size max");
STATUS_INIT(CT_SIZE_WRITING, UINT64, "size writing"); STATUS_INIT(CT_SIZE_WRITING, UINT64, "size writing");
STATUS_INIT(CT_SIZE_NONLEAF, UINT64, "size nonleaf"); STATUS_INIT(CT_SIZE_NONLEAF, UINT64, "size nonleaf");
STATUS_INIT(CT_SIZE_LEAF, UINT64, "size leaf"); STATUS_INIT(CT_SIZE_LEAF, UINT64, "size leaf");
...@@ -141,7 +140,6 @@ struct cachetable { ...@@ -141,7 +140,6 @@ struct cachetable {
int64_t size_current; // the sum of the sizes of the pairs in the cachetable int64_t size_current; // the sum of the sizes of the pairs in the cachetable
int64_t size_limit; // the limit to the sum of the pair sizes int64_t size_limit; // the limit to the sum of the pair sizes
int64_t size_evicting; // the sum of the sizes of the pairs being written int64_t size_evicting; // the sum of the sizes of the pairs being written
int64_t size_max; // high water mark of size_current (max value size_current ever had)
TOKULOGGER logger; TOKULOGGER logger;
toku_mutex_t mutex; // coarse lock that protects the cachetable, the cachefiles, and the pairs toku_mutex_t mutex; // coarse lock that protects the cachetable, the cachefiles, and the pairs
...@@ -194,7 +192,6 @@ toku_cachetable_get_status(CACHETABLE ct, CACHETABLE_STATUS statp) { ...@@ -194,7 +192,6 @@ toku_cachetable_get_status(CACHETABLE ct, CACHETABLE_STATUS statp) {
STATUS_VALUE(CT_PREFETCHES) = cachetable_prefetches; STATUS_VALUE(CT_PREFETCHES) = cachetable_prefetches;
STATUS_VALUE(CT_SIZE_CURRENT) = ct->size_current; STATUS_VALUE(CT_SIZE_CURRENT) = ct->size_current;
STATUS_VALUE(CT_SIZE_LIMIT) = ct->size_limit; STATUS_VALUE(CT_SIZE_LIMIT) = ct->size_limit;
STATUS_VALUE(CT_SIZE_MAX) = ct->size_max;
STATUS_VALUE(CT_SIZE_WRITING) = ct->size_evicting; STATUS_VALUE(CT_SIZE_WRITING) = ct->size_evicting;
STATUS_VALUE(CT_SIZE_NONLEAF) = ct->size_nonleaf; STATUS_VALUE(CT_SIZE_NONLEAF) = ct->size_nonleaf;
STATUS_VALUE(CT_SIZE_LEAF) = ct->size_leaf; STATUS_VALUE(CT_SIZE_LEAF) = ct->size_leaf;
...@@ -845,9 +842,6 @@ static void ...@@ -845,9 +842,6 @@ static void
cachetable_add_pair_attr(CACHETABLE ct, PAIR_ATTR attr) { cachetable_add_pair_attr(CACHETABLE ct, PAIR_ATTR attr) {
assert(attr.is_valid); assert(attr.is_valid);
ct->size_current += attr.size; ct->size_current += attr.size;
if (ct->size_current > ct->size_max) {
ct->size_max = ct->size_current;
}
ct->size_nonleaf += attr.nonleaf_size; ct->size_nonleaf += attr.nonleaf_size;
ct->size_leaf += attr.leaf_size; ct->size_leaf += attr.leaf_size;
ct->size_rollback += attr.rollback_size; ct->size_rollback += attr.rollback_size;
...@@ -3107,7 +3101,7 @@ void toku_cachetable_print_state (CACHETABLE ct) { ...@@ -3107,7 +3101,7 @@ void toku_cachetable_print_state (CACHETABLE ct) {
cachetable_unlock(ct); cachetable_unlock(ct);
} }
void toku_cachetable_get_state (CACHETABLE ct, int *num_entries_ptr, int *hash_size_ptr, long *size_current_ptr, long *size_limit_ptr, int64_t *size_max_ptr) { void toku_cachetable_get_state (CACHETABLE ct, int *num_entries_ptr, int *hash_size_ptr, long *size_current_ptr, long *size_limit_ptr) {
cachetable_lock(ct); cachetable_lock(ct);
if (num_entries_ptr) if (num_entries_ptr)
*num_entries_ptr = ct->n_in_table; *num_entries_ptr = ct->n_in_table;
...@@ -3117,8 +3111,6 @@ void toku_cachetable_get_state (CACHETABLE ct, int *num_entries_ptr, int *hash_s ...@@ -3117,8 +3111,6 @@ void toku_cachetable_get_state (CACHETABLE ct, int *num_entries_ptr, int *hash_s
*size_current_ptr = ct->size_current; *size_current_ptr = ct->size_current;
if (size_limit_ptr) if (size_limit_ptr)
*size_limit_ptr = ct->size_limit; *size_limit_ptr = ct->size_limit;
if (size_max_ptr)
*size_max_ptr = ct->size_max;
cachetable_unlock(ct); cachetable_unlock(ct);
} }
......
...@@ -442,7 +442,7 @@ u_int32_t toku_cachefile_fullhash_of_header (CACHEFILE cachefile); ...@@ -442,7 +442,7 @@ u_int32_t toku_cachefile_fullhash_of_header (CACHEFILE cachefile);
void toku_cachetable_print_state (CACHETABLE ct); void toku_cachetable_print_state (CACHETABLE ct);
// Get the state of the cachetable. This is used to verify the cachetable // Get the state of the cachetable. This is used to verify the cachetable
void toku_cachetable_get_state(CACHETABLE ct, int *num_entries_ptr, int *hash_size_ptr, long *size_current_ptr, long *size_limit_ptr, int64_t *size_max_ptr); void toku_cachetable_get_state(CACHETABLE ct, int *num_entries_ptr, int *hash_size_ptr, long *size_current_ptr, long *size_limit_ptr);
// Get the state of a cachetable entry by key. This is used to verify the cachetable // Get the state of a cachetable entry by key. This is used to verify the cachetable
int toku_cachetable_get_key_state(CACHETABLE ct, CACHEKEY key, CACHEFILE cf, int toku_cachetable_get_key_state(CACHETABLE ct, CACHEKEY key, CACHEFILE cf,
...@@ -474,7 +474,6 @@ typedef enum { ...@@ -474,7 +474,6 @@ typedef enum {
CT_PREFETCHES, // how many times has a block been prefetched into the cachetable? CT_PREFETCHES, // how many times has a block been prefetched into the cachetable?
CT_SIZE_CURRENT, // the sum of the sizes of the nodes represented in the cachetable CT_SIZE_CURRENT, // the sum of the sizes of the nodes represented in the cachetable
CT_SIZE_LIMIT, // the limit to the sum of the node sizes CT_SIZE_LIMIT, // the limit to the sum of the node sizes
CT_SIZE_MAX, // high water mark of size_current (max value size_current ever had)
CT_SIZE_WRITING, // the sum of the sizes of the nodes being written CT_SIZE_WRITING, // the sum of the sizes of the nodes being written
CT_SIZE_NONLEAF, // number of bytes in cachetable belonging to nonleaf nodes CT_SIZE_NONLEAF, // number of bytes in cachetable belonging to nonleaf nodes
CT_SIZE_LEAF, // number of bytes in cachetable belonging to leaf nodes CT_SIZE_LEAF, // number of bytes in cachetable belonging to leaf nodes
......
...@@ -15,8 +15,8 @@ cachetable_debug_test (int n) { ...@@ -15,8 +15,8 @@ cachetable_debug_test (int n) {
CACHEFILE f1; CACHEFILE f1;
r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0); r = toku_cachetable_openf(&f1, ct, fname1, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IRWXO); assert(r == 0);
int num_entries, hash_size; long size_current, size_limit; int64_t size_max; int num_entries, hash_size; long size_current, size_limit;
toku_cachetable_get_state(ct, &num_entries, &hash_size, &size_current, &size_limit, &size_max); toku_cachetable_get_state(ct, &num_entries, &hash_size, &size_current, &size_limit);
assert(num_entries == 0); assert(num_entries == 0);
assert(size_current == 0); assert(size_current == 0);
assert(size_limit == n); assert(size_limit == n);
...@@ -42,7 +42,7 @@ cachetable_debug_test (int n) { ...@@ -42,7 +42,7 @@ cachetable_debug_test (int n) {
r = toku_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1)); r = toku_cachetable_unpin(f1, make_blocknum(i), hi, CACHETABLE_CLEAN, make_pair_attr(1));
assert(r == 0); assert(r == 0);
toku_cachetable_get_state(ct, &num_entries, &hash_size, &size_current, &size_limit, &size_max); toku_cachetable_get_state(ct, &num_entries, &hash_size, &size_current, &size_limit);
assert(num_entries == i); assert(num_entries == i);
assert(size_current == i); assert(size_current == i);
assert(size_limit == n); assert(size_limit == n);
......
...@@ -700,11 +700,11 @@ static void test_size_flush(void) { ...@@ -700,11 +700,11 @@ static void test_size_flush(void) {
r = toku_cachetable_put(f, key, hkey, value, make_pair_attr(size), wc); r = toku_cachetable_put(f, key, hkey, value, make_pair_attr(size), wc);
assert(r == 0); assert(r == 0);
int n_entries, hash_size; long size_current, size_limit; int64_t size_max; int n_entries, hash_size; long size_current, size_limit;
toku_cachetable_get_state(t, &n_entries, &hash_size, &size_current, &size_limit, &size_max); toku_cachetable_get_state(t, &n_entries, &hash_size, &size_current, &size_limit);
while (n_entries != min2(i+1, n)) { while (n_entries != min2(i+1, n)) {
toku_pthread_yield(); maybe_flush(t); toku_pthread_yield(); maybe_flush(t);
toku_cachetable_get_state(t, &n_entries, 0, 0, 0, 0); toku_cachetable_get_state(t, &n_entries, 0, 0, 0);
} }
assert(n_entries == min2(i+1, n)); assert(n_entries == min2(i+1, n));
......
...@@ -78,7 +78,7 @@ cachetable_unpin_and_remove_test (int n) { ...@@ -78,7 +78,7 @@ cachetable_unpin_and_remove_test (int n) {
// verify that the cachtable is empty // verify that the cachtable is empty
int nentries; int nentries;
toku_cachetable_get_state(ct, &nentries, NULL, NULL, NULL, NULL); toku_cachetable_get_state(ct, &nentries, NULL, NULL, NULL);
assert(nentries == 0); assert(nentries == 0);
char *error_string; char *error_string;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment