Commit 2561797b authored by marko's avatar marko

branches/zip: On compressed pages, always update the insert buffer bitmap.

Do not assume anything about the contents of the bitmap.

ibuf_update_free_bits_low(): Use this function only for uncompressed pages.
Remove the parameter zip_size.  This function avoids latching the bitmap page
and updating the bitmap when the bits do not change.

ibuf_update_free_bits_zip(): New function based on ibuf_update_free_bits_low(),
for use on compressed pages.  Remove the parameter max_insert_size that
was used for computing the before image of the free bits.  Always update the
bitmap.

ibuf_index_page_calc_free_zip(): New function, factored out from
ibuf_index_page_calc_free().

ibuf_update_free_bits_if_full(): Document that this function must only be
invoked on uncompressed pages.  Add a debug assertion about this.
parent 5ccc74c5
......@@ -1014,7 +1014,6 @@ btr_cur_optimistic_insert(
buf_block_t* block;
page_t* page;
ulint max_size;
ulint max_size_zip = 0;
rec_t* dummy_rec;
ibool leaf;
ibool reorg;
......@@ -1046,19 +1045,6 @@ btr_cur_optimistic_insert(
max_size = page_get_max_insert_size_after_reorganize(page, 1);
leaf = page_is_leaf(page);
/* If necessary for updating the insert buffer bitmap,
calculate the current maximum insert size on a compressed page. */
if (zip_size && UNIV_LIKELY(leaf) && !dict_index_is_clust(index)) {
const page_zip_des_t* page_zip
= buf_block_get_page_zip(block);
lint zip_max
= page_zip_max_ins_size(page_zip, FALSE);
if (zip_max >= 0 && max_size > (ulint) zip_max) {
max_size_zip = (ulint) zip_max;
}
}
/* Calculate the record size when entry is converted to a record */
rec_size = rec_get_converted_size(index, entry, n_ext);
......@@ -1206,8 +1192,7 @@ fail_err:
if (zip_size) {
/* Update the bits in the same mini-transaction. */
ibuf_update_free_bits_low(zip_size, block,
max_size_zip, mtr);
ibuf_update_free_bits_zip(block, mtr);
} else {
/* Decrement the bits in a separate
mini-transaction. */
......@@ -1752,8 +1737,7 @@ btr_cur_update_in_place(
if (page_zip && !dict_index_is_clust(index)
&& page_is_leaf(buf_block_get_frame(block))) {
/* Update the free bits in the insert buffer. */
ibuf_update_free_bits_low(buf_block_get_zip_size(block),
block, UNIV_PAGE_SIZE, mtr);
ibuf_update_free_bits_zip(block, mtr);
}
btr_cur_update_in_place_log(flags, rec, index, update,
......@@ -1956,8 +1940,7 @@ err_exit:
if (page_zip && !dict_index_is_clust(index)
&& page_is_leaf(page)) {
/* Update the free bits in the insert buffer. */
ibuf_update_free_bits_low(buf_block_get_zip_size(block), block,
UNIV_PAGE_SIZE, mtr);
ibuf_update_free_bits_zip(block, mtr);
}
if (!rec_get_deleted_flag(rec, page_is_comp(page))) {
......@@ -2224,9 +2207,7 @@ btr_cur_pessimistic_update(
if (page_zip && !dict_index_is_clust(index)
&& page_is_leaf(page)) {
/* Update the free bits in the insert buffer. */
ibuf_update_free_bits_low(
buf_block_get_zip_size(block), block,
UNIV_PAGE_SIZE, mtr);
ibuf_update_free_bits_zip(block, mtr);
}
err = DB_SUCCESS;
......@@ -2708,7 +2689,6 @@ btr_cur_optimistic_delete(
mtr_t* mtr) /* in: mtr */
{
buf_block_t* block;
ulint max_ins_size;
rec_t* rec;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
......@@ -2736,24 +2716,15 @@ btr_cur_optimistic_delete(
page_t* page = buf_block_get_frame(block);
page_zip_des_t* page_zip= buf_block_get_page_zip(block);
ulint zip_size= buf_block_get_zip_size(block);
ulint max_ins = 0;
lock_update_delete(block, rec);
btr_search_update_hash_on_delete(cursor);
max_ins_size = page_get_max_insert_size_after_reorganize(
page, 1);
if (zip_size) {
lint zip_max_ins = page_zip_max_ins_size(
page_zip, FALSE/* not clustered */);
if (UNIV_UNLIKELY(zip_max_ins < 0)) {
max_ins_size = 0;
} else if (UNIV_LIKELY
(max_ins_size > (ulint) zip_max_ins)) {
max_ins_size = (ulint) zip_max_ins;
}
if (!page_zip) {
max_ins = page_get_max_insert_size_after_reorganize(
page, 1);
}
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page));
......@@ -2764,10 +2735,15 @@ btr_cur_optimistic_delete(
ut_a(!page_zip || page_zip_validate(page_zip, page));
#endif /* UNIV_ZIP_DEBUG */
if (!dict_index_is_clust(cursor->index)
&& page_is_leaf(page)) {
ibuf_update_free_bits_low(zip_size, block,
max_ins_size, mtr);
if (dict_index_is_clust(cursor->index)
|| !page_is_leaf(page)) {
/* The insert buffer does not handle
inserts to clustered indexes or to non-leaf
pages of secondary index B-trees. */
} else if (page_zip) {
ibuf_update_free_bits_zip(block, mtr);
} else {
ibuf_update_free_bits_low(block, max_ins, mtr);
}
}
......
......@@ -790,13 +790,13 @@ UNIV_INLINE
void
ibuf_set_free_bits_low(
/*===================*/
ulint zip_size,/* in: compressed page size in bytes;
0 for uncompressed pages */
buf_block_t* block, /* in: index page; free bits are set if
the index is non-clustered and page
level is 0 */
ulint val, /* in: value to set: < 4 */
mtr_t* mtr) /* in: mtr */
ulint zip_size,/* in: compressed page size in bytes;
0 for uncompressed pages */
const buf_block_t* block, /* in: index page; free bits are set if
the index is non-clustered and page
level is 0 */
ulint val, /* in: value to set: < 4 */
mtr_t* mtr) /* in/out: mtr */
{
page_t* bitmap_page;
ulint space;
......@@ -911,33 +911,70 @@ ibuf_reset_free_bits(
}
/**************************************************************************
Updates the free bits for a page to reflect the present state. Does this
in the mtr given, which means that the latching order rules virtually prevent
any further operations for this OS thread until mtr is committed. */
Updates the free bits for an uncompressed page to reflect the present state.
Does this in the mtr given, which means that the latching order rules virtually
prevent any further operations for this OS thread until mtr is committed. */
void
ibuf_update_free_bits_low(
/*======================*/
ulint zip_size, /* in: compressed page size in bytes;
0 for uncompressed pages */
buf_block_t* block, /* in: index page */
ulint max_ins_size, /* in: value of maximum insert size
with reorganize before the latest
operation performed to the page */
mtr_t* mtr) /* in: mtr */
const buf_block_t* block, /* in: index page */
ulint max_ins_size, /* in: value of
maximum insert size
with reorganize before
the latest operation
performed to the page */
mtr_t* mtr) /* in/out: mtr */
{
ulint before;
ulint after;
before = ibuf_index_page_calc_free_bits(zip_size, max_ins_size);
ut_a(!buf_block_get_page_zip(block));
before = ibuf_index_page_calc_free_bits(0, max_ins_size);
after = ibuf_index_page_calc_free(zip_size, block);
after = ibuf_index_page_calc_free(0, block);
/* This approach cannot be used on compressed pages, since the
computed value of "before" often does not match the current
state of the bitmap. This is because the free space may
increase or decrease when a compressed page is reorganized. */
if (before != after) {
ibuf_set_free_bits_low(zip_size, block, after, mtr);
ibuf_set_free_bits_low(0, block, after, mtr);
}
}
/**************************************************************************
Updates the free bits for a compressed page to reflect the present state.
Does this in the mtr given, which means that the latching order rules virtually
prevent any further operations for this OS thread until mtr is committed. */
void
ibuf_update_free_bits_zip(
/*======================*/
const buf_block_t* block, /* in: index page */
mtr_t* mtr) /* in/out: mtr */
{
page_t* bitmap_page;
ulint space;
ulint page_no;
ulint zip_size;
ulint after;
space = buf_block_get_space(block);
page_no = buf_block_get_page_no(block);
zip_size = buf_block_get_zip_size(block);
ut_a(page_is_leaf(buf_block_get_frame(block)));
ut_a(zip_size);
bitmap_page = ibuf_bitmap_get_map_page(space, page_no, zip_size, mtr);
after = ibuf_index_page_calc_free_zip(zip_size, block);
ibuf_bitmap_page_set_bits(bitmap_page, page_no, zip_size,
IBUF_BITMAP_FREE, after, mtr);
}
/**************************************************************************
Updates the free bits for the two pages to reflect the present state. Does
this in the mtr given, which means that the latching order rules virtually
......
......@@ -67,10 +67,11 @@ ibuf_reset_free_bits(
if the index is a non-clustered
non-unique, and page level is 0 */
/****************************************************************************
Updates the free bits of the page in the ibuf bitmap if there is not enough
free on the page any more. This is done in a separate mini-transaction, hence
this operation does not restrict further work to only ibuf bitmap operations,
which would result if the latch to the bitmap page were kept. */
Updates the free bits of an uncompressed page in the ibuf bitmap if
there is not enough free on the page any more. This is done in a
separate mini-transaction, hence this operation does not restrict
further work to only ibuf bitmap operations, which would result if the
latch to the bitmap page were kept. */
UNIV_INLINE
void
ibuf_update_free_bits_if_full(
......@@ -87,20 +88,30 @@ ibuf_update_free_bits_if_full(
used in the latest operation, if known, or
ULINT_UNDEFINED */
/**************************************************************************
Updates the free bits for the page to reflect the present state. Does this
in the mtr given, which means that the latching order rules virtually
Updates the free bits for an uncompressed page to reflect the present state.
Does this in the mtr given, which means that the latching order rules virtually
prevent any further operations for this OS thread until mtr is committed. */
void
ibuf_update_free_bits_low(
/*======================*/
ulint zip_size, /* in: compressed page size in bytes;
0 for uncompressed pages */
buf_block_t* block, /* in: index page */
ulint max_ins_size, /* in: value of maximum insert size
with reorganize before the latest
operation performed to the page */
mtr_t* mtr); /* in: mtr */
const buf_block_t* block, /* in: index page */
ulint max_ins_size, /* in: value of
maximum insert size
with reorganize before
the latest operation
performed to the page */
mtr_t* mtr); /* in/out: mtr */
/**************************************************************************
Updates the free bits for a compressed page to reflect the present state.
Does this in the mtr given, which means that the latching order rules virtually
prevent any further operations for this OS thread until mtr is committed. */
void
ibuf_update_free_bits_zip(
/*======================*/
const buf_block_t* block, /* in: index page */
mtr_t* mtr); /* in/out: mtr */
/**************************************************************************
Updates the free bits for the two pages to reflect the present state. Does
this in the mtr given, which means that the latching order rules virtually
......
......@@ -192,6 +192,40 @@ ibuf_index_page_calc_free_from_bits(
return(bits * (UNIV_PAGE_SIZE / IBUF_PAGE_SIZE_PER_FREE_SPACE));
}
/*************************************************************************
Translates the free space on a compressed page to a value in the ibuf bitmap.*/
UNIV_INLINE
ulint
ibuf_index_page_calc_free_zip(
/*==========================*/
/* out: value for ibuf bitmap bits */
ulint zip_size,
/* in: compressed page size in bytes */
const buf_block_t* block) /* in: buffer block */
{
ulint max_ins_size;
const page_zip_des_t* page_zip;
lint zip_max_ins;
ut_ad(zip_size == buf_block_get_zip_size(block));
ut_ad(zip_size);
max_ins_size = page_get_max_insert_size_after_reorganize(
buf_block_get_frame(block), 1);
page_zip = buf_block_get_page_zip(block);
zip_max_ins = page_zip_max_ins_size(page_zip,
FALSE/* not clustered */);
if (UNIV_UNLIKELY(zip_max_ins < 0)) {
return(0);
} else if (UNIV_LIKELY(max_ins_size > (ulint) zip_max_ins)) {
max_ins_size = (ulint) zip_max_ins;
}
return(ibuf_index_page_calc_free_bits(zip_size, max_ins_size));
}
/*************************************************************************
Translates the free space on a page to a value in the ibuf bitmap.*/
UNIV_INLINE
......@@ -203,38 +237,26 @@ ibuf_index_page_calc_free(
0 for uncompressed pages */
const buf_block_t* block) /* in: buffer block */
{
ulint max_ins_size;
ut_ad(zip_size == buf_block_get_zip_size(block));
max_ins_size = page_get_max_insert_size_after_reorganize(
buf_block_get_frame(block), 1);
if (!zip_size) {
return(ibuf_index_page_calc_free_bits(0, max_ins_size));
} else {
const page_zip_des_t* page_zip;
lint zip_max_ins;
ulint max_ins_size;
page_zip = buf_block_get_page_zip(block);
zip_max_ins = page_zip_max_ins_size(page_zip,
FALSE/* not clustered */);
if (UNIV_UNLIKELY(zip_max_ins < 0)) {
max_ins_size = 0;
} else if (UNIV_LIKELY(max_ins_size > (ulint) zip_max_ins)) {
max_ins_size = (ulint) zip_max_ins;
}
max_ins_size = page_get_max_insert_size_after_reorganize(
buf_block_get_frame(block), 1);
return(ibuf_index_page_calc_free_bits(zip_size, max_ins_size));
return(ibuf_index_page_calc_free_bits(0, max_ins_size));
} else {
return(ibuf_index_page_calc_free_zip(zip_size, block));
}
}
/****************************************************************************
Updates the free bits of the page in the ibuf bitmap if there is not enough
free on the page any more. This is done in a separate mini-transaction, hence
this operation does not restrict further work to only ibuf bitmap operations,
which would result if the latch to the bitmap page were kept. */
Updates the free bits of an uncompressed page in the ibuf bitmap if
there is not enough free on the page any more. This is done in a
separate mini-transaction, hence this operation does not restrict
further work to only ibuf bitmap operations, which would result if the
latch to the bitmap page were kept. */
UNIV_INLINE
void
ibuf_update_free_bits_if_full(
......@@ -254,6 +276,8 @@ ibuf_update_free_bits_if_full(
ulint before;
ulint after;
ut_ad(!buf_block_get_page_zip(block));
before = ibuf_index_page_calc_free_bits(0, max_ins_size);
if (max_ins_size >= increase) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment