Commit 35db9631 authored by marko's avatar marko

branches/zip: Add statistics on page compression and decompression counts.

ha_innodb.cc: Add the columns COMPRESSED, COMPRESSED_OK, DECOMPRESSED
to INFORMATION_SCHEMA.INNODB_BUDDY.

page_zip_compress_count[], page_zip_compress_ok[]: New statistic counters,
incremented in page_zip_compress().

page_zip_decompress_count[]: New statistic counter,
incremented in page_zip_decompress().
parent 1ac0e7f0
......@@ -8775,14 +8775,29 @@ innobase_is_buddy_fill(
{
TABLE* table = (TABLE *) tables->table;
int status = 0;
uint y = 0;
DBUG_ENTER("innobase_is_buddy_fill");
/* Determine log2(PAGE_ZIP_MIN_SIZE / 2 / BUF_BUDDY_LOW). */
for (uint r = PAGE_ZIP_MIN_SIZE / 2 / BUF_BUDDY_LOW; r >>= 1; y++);
mutex_enter_noninline(&buf_pool->mutex);
for (uint x = 0; x <= BUF_BUDDY_SIZES; x++) {
table->field[0]->store(BUF_BUDDY_LOW << x);
table->field[1]->store(buf_buddy_relocated[x]);
table->field[2]->store(buf_buddy_used[x]);
if (x > y) {
const uint i = x - y;
table->field[2]->store(page_zip_compress_count[i]);
table->field[3]->store(page_zip_compress_ok[i]);
table->field[4]->store(page_zip_decompress_count[i]);
} else {
table->field[2]->store(0);
table->field[3]->store(0);
table->field[4]->store(0);
}
table->field[5]->store(buf_buddy_used[x]);
if (schema_table_store_record(thd, table)) {
status = 1;
......@@ -8799,6 +8814,12 @@ static ST_FIELD_INFO innobase_is_buddy_fields[] =
{
{"SIZE", 5, MYSQL_TYPE_LONG, 0, 0, "Block Size"},
{"RELOCATED", 21, MYSQL_TYPE_LONG, 0, 0, "Total Number of Relocations"},
{"COMPRESSED", 21, MYSQL_TYPE_LONG, 0, 0,
"Total Number of Compressions"},
{"COMPRESSED_OK", 21, MYSQL_TYPE_LONG, 0, 0,
"Total Number of Successful Compressions"},
{"DECOMPRESSED", 21, MYSQL_TYPE_LONG, 0, 0,
"Total Number of Decompressions"},
{"USED", 21, MYSQL_TYPE_LONG, 0, 0, "Currently in Use"},
{0, 0, MYSQL_TYPE_STRING, 0, 0, 0}
};
......
......@@ -42,11 +42,19 @@ struct page_zip_des_struct
columns on the page; the maximum
is 744 on a 16 KiB page */
unsigned ssize:3; /* 0 or compressed page size;
the size in bytes is 512<<ssize. */
the size in bytes is
PAGE_ZIP_MIN_SIZE << (ssize - 1). */
};
#define PAGE_ZIP_MIN_SIZE 1024 /* smallest page_zip_des_struct.size */
/** Number of page compressions, indexed by page_zip_des_t::ssize */
extern ulint page_zip_compress_count[8];
/** Number of successful page compressions, indexed by page_zip_des_t::ssize */
extern ulint page_zip_compress_ok[8];
/** Number of page decompressions, indexed by page_zip_des_t::ssize */
extern ulint page_zip_decompress_count[8];
/**************************************************************************
Write data to the compressed page. The data must already be written to
the uncompressed page. */
......
......@@ -25,6 +25,13 @@ Created June 2005 by Marko Makela
#include "log0recv.h"
#include "zlib.h"
/** Number of page compressions, indexed by page_zip_des_t::ssize */
ulint page_zip_compress_count[8];
/** Number of successful page compressions, indexed by page_zip_des_t::ssize */
ulint page_zip_compress_ok[8];
/** Number of page decompressions, indexed by page_zip_des_t::ssize */
ulint page_zip_decompress_count[8];
/* Please refer to ../include/page0zip.ic for a description of the
compressed page format. */
......@@ -1062,6 +1069,8 @@ page_zip_compress(
n_fields, n_dense);
}
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
page_zip_compress_count[page_zip->ssize]++;
if (UNIV_UNLIKELY(n_dense * PAGE_ZIP_DIR_SLOT_SIZE
>= page_zip_get_size(page_zip))) {
return(FALSE);
......@@ -1224,6 +1233,8 @@ page_zip_compress(
page_zip_compress_write_log(page_zip, page, index, mtr);
}
page_zip_compress_ok[page_zip->ssize]++;
return(TRUE);
}
......@@ -2588,6 +2599,7 @@ page_zip_decompress(
page_zip_fields_free(index);
mem_heap_free(heap);
page_zip_decompress_count[page_zip->ssize]++;
return(TRUE);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment