Commit 67e3d1ee authored by Marko Mäkelä's avatar Marko Mäkelä

Reduce the number of dict_table_page_size() calls

parent f6481457
...@@ -591,7 +591,7 @@ static dberr_t btr_cur_instant_init_low(dict_index_t* index, mtr_t* mtr) ...@@ -591,7 +591,7 @@ static dberr_t btr_cur_instant_init_low(dict_index_t* index, mtr_t* mtr)
} else { } else {
col->def_val.data = btr_copy_externally_stored_field( col->def_val.data = btr_copy_externally_stored_field(
&col->def_val.len, data, &col->def_val.len, data,
dict_table_page_size(index->table), cur.page_cur.block->page.size,
len, index->table->heap); len, index->table->heap);
} }
} }
...@@ -3686,9 +3686,9 @@ btr_cur_pessimistic_insert( ...@@ -3686,9 +3686,9 @@ btr_cur_pessimistic_insert(
index->first_user_field()))); index->first_user_field())));
if (page_zip_rec_needs_ext(rec_get_converted_size(index, entry, n_ext), if (page_zip_rec_needs_ext(rec_get_converted_size(index, entry, n_ext),
dict_table_is_comp(index->table), index->table->not_redundant(),
dtuple_get_n_fields(entry), dtuple_get_n_fields(entry),
dict_table_page_size(index->table)) btr_cur_get_block(cursor)->page.size)
|| UNIV_UNLIKELY(entry->is_alter_metadata())) { || UNIV_UNLIKELY(entry->is_alter_metadata())) {
/* The record is so big that we have to store some fields /* The record is so big that we have to store some fields
externally on separate database pages */ externally on separate database pages */
...@@ -4558,7 +4558,7 @@ btr_cur_optimistic_update( ...@@ -4558,7 +4558,7 @@ btr_cur_optimistic_update(
if (page_zip_rec_needs_ext(new_rec_size, page_is_comp(page), if (page_zip_rec_needs_ext(new_rec_size, page_is_comp(page),
dict_index_get_n_fields(index), dict_index_get_n_fields(index),
dict_table_page_size(index->table))) { block->page.size)) {
goto any_extern; goto any_extern;
} }
...@@ -7526,8 +7526,8 @@ btr_store_big_rec_extern_fields( ...@@ -7526,8 +7526,8 @@ btr_store_big_rec_extern_fields(
ut_ad(buf_block_get_frame(rec_block) == page_align(rec)); ut_ad(buf_block_get_frame(rec_block) == page_align(rec));
ut_a(dict_index_is_clust(index)); ut_a(dict_index_is_clust(index));
ut_a(dict_table_page_size(index->table) ut_ad(dict_table_page_size(index->table)
.equals_to(rec_block->page.size)); .equals_to(rec_block->page.size));
btr_blob_log_check_t redo_log(pcur, btr_mtr, offsets, &rec_block, btr_blob_log_check_t redo_log(pcur, btr_mtr, offsets, &rec_block,
&rec, op); &rec, op);
...@@ -7572,15 +7572,13 @@ btr_store_big_rec_extern_fields( ...@@ -7572,15 +7572,13 @@ btr_store_big_rec_extern_fields(
} }
#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
const page_size_t page_size(dict_table_page_size(index->table));
/* Space available in compressed page to carry blob data */ /* Space available in compressed page to carry blob data */
const ulint payload_size_zip = page_size.physical() const ulint payload_size_zip = rec_block->page.size.physical()
- FIL_PAGE_DATA; - FIL_PAGE_DATA;
/* Space available in uncompressed page to carry blob data */ /* Space available in uncompressed page to carry blob data */
const ulint payload_size = page_size.physical() const ulint payload_size = payload_size_zip
- FIL_PAGE_DATA - BTR_BLOB_HDR_SIZE - FIL_PAGE_DATA_END; - (BTR_BLOB_HDR_SIZE + FIL_PAGE_DATA_END);
/* We have to create a file segment to the tablespace /* We have to create a file segment to the tablespace
for each field and put the pointer to the field in rec */ for each field and put the pointer to the field in rec */
......
...@@ -657,7 +657,7 @@ dtuple_convert_big_rec( ...@@ -657,7 +657,7 @@ dtuple_convert_big_rec(
while (page_zip_rec_needs_ext(rec_get_converted_size(index, entry, while (page_zip_rec_needs_ext(rec_get_converted_size(index, entry,
*n_ext), *n_ext),
dict_table_is_comp(index->table), index->table->not_redundant(),
dict_index_get_n_fields(index), dict_index_get_n_fields(index),
dict_table_page_size(index->table))) { dict_table_page_size(index->table))) {
longest_i = 0; longest_i = 0;
......
...@@ -3282,7 +3282,7 @@ fts_fetch_doc_from_rec( ...@@ -3282,7 +3282,7 @@ fts_fetch_doc_from_rec(
doc->text.f_str = doc->text.f_str =
btr_rec_copy_externally_stored_field( btr_rec_copy_externally_stored_field(
clust_rec, offsets, clust_rec, offsets,
dict_table_page_size(table), btr_pcur_get_block(pcur)->page.size,
clust_pos, &doc->text.f_len, clust_pos, &doc->text.f_len,
static_cast<mem_heap_t*>( static_cast<mem_heap_t*>(
doc->self_heap->arg)); doc->self_heap->arg));
......
...@@ -746,14 +746,15 @@ rtr_adjust_upper_level( ...@@ -746,14 +746,15 @@ rtr_adjust_upper_level(
prev_page_no = btr_page_get_prev(page, mtr); prev_page_no = btr_page_get_prev(page, mtr);
next_page_no = btr_page_get_next(page, mtr); next_page_no = btr_page_get_next(page, mtr);
space = block->page.id.space(); space = block->page.id.space();
const page_size_t& page_size = dict_table_page_size(index->table); ut_ad(block->page.size.equals_to(dict_table_page_size(index->table)));
/* Update page links of the level */ /* Update page links of the level */
if (prev_page_no != FIL_NULL) { if (prev_page_no != FIL_NULL) {
page_id_t prev_page_id(space, prev_page_no); page_id_t prev_page_id(space, prev_page_no);
buf_block_t* prev_block = btr_block_get( buf_block_t* prev_block = btr_block_get(
prev_page_id, page_size, RW_X_LATCH, index, mtr); prev_page_id, block->page.size, RW_X_LATCH,
index, mtr);
#ifdef UNIV_BTR_DEBUG #ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(prev_block->frame) == page_is_comp(page)); ut_a(page_is_comp(prev_block->frame) == page_is_comp(page));
ut_a(btr_page_get_next(prev_block->frame, mtr) ut_a(btr_page_get_next(prev_block->frame, mtr)
...@@ -769,7 +770,8 @@ rtr_adjust_upper_level( ...@@ -769,7 +770,8 @@ rtr_adjust_upper_level(
page_id_t next_page_id(space, next_page_no); page_id_t next_page_id(space, next_page_no);
buf_block_t* next_block = btr_block_get( buf_block_t* next_block = btr_block_get(
next_page_id, page_size, RW_X_LATCH, index, mtr); next_page_id, block->page.size, RW_X_LATCH,
index, mtr);
#ifdef UNIV_BTR_DEBUG #ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(next_block->frame) == page_is_comp(page)); ut_a(page_is_comp(next_block->frame) == page_is_comp(page));
ut_a(btr_page_get_prev(next_block->frame, mtr) ut_a(btr_page_get_prev(next_block->frame, mtr)
......
...@@ -3326,7 +3326,7 @@ row_sel_get_clust_rec_for_mysql( ...@@ -3326,7 +3326,7 @@ row_sel_get_clust_rec_for_mysql(
and is it not unsafe to use RW_NO_LATCH here? */ and is it not unsafe to use RW_NO_LATCH here? */
buf_block_t* block = buf_page_get_gen( buf_block_t* block = buf_page_get_gen(
btr_pcur_get_block(prebuilt->pcur)->page.id, btr_pcur_get_block(prebuilt->pcur)->page.id,
dict_table_page_size(sec_index->table), btr_pcur_get_block(prebuilt->pcur)->page.size,
RW_NO_LATCH, NULL, BUF_GET, RW_NO_LATCH, NULL, BUF_GET,
__FILE__, __LINE__, mtr, &err); __FILE__, __LINE__, mtr, &err);
mem_heap_t* heap = mem_heap_create(256); mem_heap_t* heap = mem_heap_create(256);
......
...@@ -1335,6 +1335,8 @@ trx_undo_page_report_modify( ...@@ -1335,6 +1335,8 @@ trx_undo_page_report_modify(
table, col); table, col);
ut_a(prefix_len < sizeof ext_buf); ut_a(prefix_len < sizeof ext_buf);
const page_size_t& page_size
= dict_table_page_size(table);
/* If there is a spatial index on it, /* If there is a spatial index on it,
log its MBR */ log its MBR */
...@@ -1343,9 +1345,7 @@ trx_undo_page_report_modify( ...@@ -1343,9 +1345,7 @@ trx_undo_page_report_modify(
col->mtype)); col->mtype));
trx_undo_get_mbr_from_ext( trx_undo_get_mbr_from_ext(
mbr, mbr, page_size,
dict_table_page_size(
table),
field, &flen); field, &flen);
} }
...@@ -1354,7 +1354,7 @@ trx_undo_page_report_modify( ...@@ -1354,7 +1354,7 @@ trx_undo_page_report_modify(
flen < REC_ANTELOPE_MAX_INDEX_COL_LEN flen < REC_ANTELOPE_MAX_INDEX_COL_LEN
&& !ignore_prefix && !ignore_prefix
? ext_buf : NULL, prefix_len, ? ext_buf : NULL, prefix_len,
dict_table_page_size(table), page_size,
&field, &flen, &field, &flen,
spatial_status); spatial_status);
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment