Commit 7a8cc852 authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-35049: btr_search_check_free_space_in_heap() is a bottleneck

MEM_HEAP_BTR_SEARCH: Remove. Let us handle this special type of
mem_heap_t allocations in the only compilation unit, btr0sea.cc.

mem_block_info_t::ahi_block: Replaces free_block. This caches one
buffer page for use in adaptive hash index allocations. This is
protected by btr_search_sys_t::partition::latch. It only is
Atomic_relaxed because btr_search_free_space() is following a
pattern of test, lock, and test.

btr_search_check_free_space(): Protect the ahi_block with a
shared AHI partition latch. We must recheck btr_search_enabled after
acquiring the latch in order to avoid a race condition with
btr_search_disable(). Using a shared latch instead of an exclusive one
should reduce contention with btr_search_guess_on_hash() and other
operations when running with innodb_adaptive_hash_index=ON.

This has been tested by running the regression test suite
with the adaptive hash index enabled:
./mtr --mysqld=--loose-innodb-adaptive-hash-index=ON
parent cc70ca7e
This diff is collapsed.
......@@ -30,7 +30,7 @@ Created 2/17/1996 Heikki Tuuri
#include "dict0dict.h"
#ifdef BTR_CUR_HASH_ADAPT
#include "ha0ha.h"
#include "srw_lock.h"
#include "buf0buf.h"
#ifdef UNIV_PFS_RWLOCK
extern mysql_pfs_key_t btr_search_latch_key;
......@@ -240,69 +240,47 @@ struct btr_search_sys_t
/** Partition of the hash table */
struct partition
{
/** latches protecting hash_table */
srw_spin_lock latch;
/** latches protecting the hash table */
alignas(CPU_LEVEL1_DCACHE_LINESIZE) srw_spin_lock latch;
/** mapping of dtuple_fold() to rec_t* in buf_block_t::frame */
hash_table_t table;
/** memory heap for table */
mem_heap_t *heap;
#ifdef _MSC_VER
#pragma warning(push)
// nonstandard extension - zero sized array, if perfschema is not compiled
#pragma warning(disable : 4200)
#endif
inline void init() noexcept;
char pad[(CPU_LEVEL1_DCACHE_LINESIZE - sizeof latch -
sizeof table - sizeof heap) &
(CPU_LEVEL1_DCACHE_LINESIZE - 1)];
inline void alloc(ulint hash_size) noexcept;
#ifdef _MSC_VER
#pragma warning(pop)
#endif
inline void clear() noexcept;
inline void free() noexcept;
void init()
{
memset((void*) this, 0, sizeof *this);
latch.SRW_LOCK_INIT(btr_search_latch_key);
}
void alloc(ulint hash_size)
{
table.create(hash_size);
heap= mem_heap_create_typed(std::min<ulong>(4096,
MEM_MAX_ALLOC_IN_BUF / 2
- MEM_BLOCK_HEADER_SIZE
- MEM_SPACE_NEEDED(0)),
MEM_HEAP_FOR_BTR_SEARCH);
}
void clear()
{
mem_heap_free(heap);
heap= nullptr;
ut_free(table.array);
}
void free()
{
latch.destroy();
if (heap)
clear();
}
__attribute__((nonnull))
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Insert or replace an entry into the hash table.
@param fold hash value of data
@param rec B-tree leaf page record
@param block the buffer block that contains rec */
void insert(ulint fold, const rec_t *rec, buf_block_t *block) noexcept;
#else
/** Insert or replace an entry into the hash table.
@param fold hash value of data
@param rec B-tree leaf page record */
void insert(ulint fold, const rec_t *rec) noexcept;
#endif
};
/** Partitions of the adaptive hash index */
partition *parts;
/** Get an adaptive hash index partition */
partition *get_part(index_id_t id, ulint space_id) const
partition *get_part(index_id_t id, ulint space_id) const noexcept
{
return parts + ut_fold_ulint_pair(ulint(id), space_id) % btr_ahi_parts;
}
/** Get an adaptive hash index partition */
partition *get_part(const dict_index_t &index) const
partition *get_part(const dict_index_t &index) const noexcept
{
ut_ad(!index.table->space ||
index.table->space->id == index.table->space_id);
......@@ -314,37 +292,15 @@ struct btr_search_sys_t
{ return &get_part(index)->latch; }
/** Create and initialize at startup */
void create()
{
parts= static_cast<partition*>(ut_malloc(btr_ahi_parts * sizeof *parts,
mem_key_ahi));
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].init();
if (btr_search_enabled)
btr_search_enable();
}
void create() noexcept;
void alloc(ulint hash_size)
{
hash_size/= btr_ahi_parts;
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].alloc(hash_size);
}
void alloc(ulint hash_size) noexcept;
/** Clear when disabling the adaptive hash index */
void clear() { for (ulong i= 0; i < btr_ahi_parts; ++i) parts[i].clear(); }
inline void clear() noexcept;
/** Free at shutdown */
void free()
{
if (parts)
{
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].free();
ut_free(parts);
parts= nullptr;
}
}
void free() noexcept;
};
/** The adaptive hash index */
......
......@@ -950,7 +950,7 @@ struct buf_block_t{
Another exception is that ha_insert_for_fold() may
decrement n_pointers without holding the appropriate latch
in btr_search_latches[]. Thus, n_pointers must be
in btr_search_sys.parts[]. Thus, n_pointers must be
protected by atomic memory access.
This implies that the fields may be read without race
......
......@@ -39,38 +39,6 @@ ha_node_get_data(
return(node->data);
}
/******************************************************************//**
Sets hash node data. */
UNIV_INLINE
void
ha_node_set_data_func(
/*==================*/
ha_node_t* node, /*!< in: hash chain node */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /*!< in: buffer block containing the data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t* data) /*!< in: pointer to the data */
{
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
node->block = block;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
node->data = data;
}
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Sets hash node data.
@param n in: hash chain node
@param b in: buffer block containing the data
@param d in: pointer to the data */
# define ha_node_set_data(n,b,d) ha_node_set_data_func(n,b,d)
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/** Sets hash node data.
@param n in: hash chain node
@param b in: buffer block containing the data
@param d in: pointer to the data */
# define ha_node_set_data(n,b,d) ha_node_set_data_func(n,d)
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/******************************************************************//**
Gets the next node in a hash chain.
@return next node, NULL if none */
......
......@@ -28,8 +28,6 @@ Created 6/9/1994 Heikki Tuuri
#define mem0mem_h
#include "ut0mem.h"
#include "ut0rnd.h"
#include "mach0data.h"
#include <memory>
......@@ -42,22 +40,14 @@ typedef struct mem_block_info_t mem_block_t;
/** A memory heap is a nonempty linear list of memory blocks */
typedef mem_block_t mem_heap_t;
struct buf_block_t;
/** Types of allocation for memory heaps: DYNAMIC means allocation from the
dynamic memory pool of the C compiler, BUFFER means allocation from the
buffer pool; the latter method is used for very big heaps */
#define MEM_HEAP_DYNAMIC 0 /* the most common type */
#define MEM_HEAP_BUFFER 1
#define MEM_HEAP_BTR_SEARCH 2 /* this flag can optionally be
ORed to MEM_HEAP_BUFFER, in which
case heap->free_block is used in
some cases for memory allocations,
and if it's NULL, the memory
allocation functions can return
NULL. */
/** Different type of heaps in terms of which datastructure is using them */
#define MEM_HEAP_FOR_BTR_SEARCH (MEM_HEAP_BTR_SEARCH | MEM_HEAP_BUFFER)
#define MEM_HEAP_FOR_LOCK_HEAP (MEM_HEAP_BUFFER)
/** The following start size is used for the first block in the memory heap if
......@@ -110,8 +100,7 @@ A single user buffer of 'size' will fit in the block.
@param[in] file_name File name where created
@param[in] line Line where created
@param[in] type Heap type
@return own: memory heap, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap */
UNIV_INLINE
mem_heap_t*
mem_heap_create_func(
......@@ -145,8 +134,7 @@ mem_heap_zalloc(
@param[in] heap memory heap
@param[in] n number of bytes; if the heap is allowed to grow into
the buffer pool, this must be <= MEM_MAX_ALLOC_IN_BUF
@return allocated storage, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return allocated storage */
UNIV_INLINE
void*
mem_heap_alloc(
......@@ -319,19 +307,17 @@ struct mem_block_info_t {
in the heap. This is defined only in the base
node and is set to ULINT_UNDEFINED in others. */
ulint type; /*!< type of heap: MEM_HEAP_DYNAMIC, or
MEM_HEAP_BUF possibly ORed to MEM_HEAP_BTR_SEARCH */
MEM_HEAP_BUFFER */
ulint free; /*!< offset in bytes of the first free position for
user data in the block */
ulint start; /*!< the value of the struct field 'free' at the
creation of the block */
void* free_block;
/* if the MEM_HEAP_BTR_SEARCH bit is set in type,
and this is the heap root, this can contain an
allocated buffer frame, which can be appended as a
free block to the heap, if we need more space;
otherwise, this is NULL */
void* buf_block;
#ifdef BTR_CUR_HASH_ADAPT
/** a cached block in the heap root */
Atomic_relaxed<buf_block_t*> ahi_block;
#endif
buf_block_t* buf_block;
/* if this block has been allocated from the buffer
pool, this contains the buf_block_t handle;
otherwise, this is NULL */
......
......@@ -39,8 +39,7 @@ Created 6/8/1994 Heikki Tuuri
#endif /* UNIV_DEBUG */
/***************************************************************//**
Creates a memory heap block where data can be allocated.
@return own: memory heap block, NULL if did not succeed (only possible
for MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap block */
mem_block_t*
mem_heap_create_block_func(
/*=======================*/
......@@ -62,19 +61,11 @@ mem_heap_block_free(
mem_heap_t* heap, /*!< in: heap */
mem_block_t* block); /*!< in: block to free */
/******************************************************************//**
Frees the free_block field from a memory heap. */
void
mem_heap_free_block_free(
/*=====================*/
mem_heap_t* heap); /*!< in: heap */
/***************************************************************//**
Adds a new block to a memory heap.
@param[in] heap memory heap
@param[in] n number of bytes needed
@return created block, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return created block */
mem_block_t*
mem_heap_add_block(
mem_heap_t* heap,
......@@ -100,9 +91,7 @@ UNIV_INLINE
void
mem_block_set_type(mem_block_t* block, ulint type)
{
ut_ad((type == MEM_HEAP_DYNAMIC) || (type == MEM_HEAP_BUFFER)
|| (type == MEM_HEAP_BUFFER + MEM_HEAP_BTR_SEARCH));
ut_ad(type == MEM_HEAP_DYNAMIC || type == MEM_HEAP_BUFFER);
block->type = type;
}
......@@ -157,8 +146,6 @@ mem_heap_zalloc(
mem_heap_t* heap,
ulint n)
{
ut_ad(heap);
ut_ad(!(heap->type & MEM_HEAP_BTR_SEARCH));
return(memset(mem_heap_alloc(heap, n), 0, n));
}
......@@ -166,8 +153,7 @@ mem_heap_zalloc(
@param[in] heap memory heap
@param[in] n number of bytes; if the heap is allowed to grow into
the buffer pool, this must be <= MEM_MAX_ALLOC_IN_BUF
@return allocated storage, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return allocated storage */
UNIV_INLINE
void*
mem_heap_alloc(
......@@ -289,11 +275,10 @@ void
mem_heap_empty(
mem_heap_t* heap)
{
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!heap->ahi_block);
#endif
mem_heap_free_heap_top(heap, (byte*) heap + mem_block_get_start(heap));
if (heap->free_block) {
mem_heap_free_block_free(heap);
}
}
/** Returns a pointer to the topmost element in a memory heap.
......@@ -356,8 +341,7 @@ A single user buffer of 'size' will fit in the block.
@param[in] file_name File name where created
@param[in] line Line where created
@param[in] type Heap type
@return own: memory heap, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap */
UNIV_INLINE
mem_heap_t*
mem_heap_create_func(
......@@ -401,15 +385,15 @@ void
mem_heap_free(
mem_heap_t* heap)
{
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!heap->ahi_block);
#endif
mem_block_t* block;
mem_block_t* prev_block;
block = UT_LIST_GET_LAST(heap->base);
if (heap->free_block) {
mem_heap_free_block_free(heap);
}
while (block != NULL) {
/* Store the contents of info before freeing current block
(it is erased in freeing) */
......@@ -430,13 +414,10 @@ mem_heap_get_size(
/*==============*/
mem_heap_t* heap) /*!< in: heap */
{
ulint size = heap->total_size;
if (heap->free_block) {
size += srv_page_size;
}
return(size);
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!heap->ahi_block);
#endif
return heap->total_size;
}
/**********************************************************************//**
......
......@@ -214,7 +214,6 @@ mem_heap_validate(
case MEM_HEAP_DYNAMIC:
break;
case MEM_HEAP_BUFFER:
case MEM_HEAP_BUFFER | MEM_HEAP_BTR_SEARCH:
ut_ad(block->len <= srv_page_size);
break;
default:
......@@ -241,8 +240,7 @@ static void ut_strlcpy_rev(char* dst, const char* src, ulint size)
/***************************************************************//**
Creates a memory heap block where data can be allocated.
@return own: memory heap block, NULL if did not succeed (only possible
for MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap block */
mem_block_t*
mem_heap_create_block_func(
/*=======================*/
......@@ -256,12 +254,11 @@ mem_heap_create_block_func(
ulint type) /*!< in: type of heap: MEM_HEAP_DYNAMIC or
MEM_HEAP_BUFFER */
{
buf_block_t* buf_block = NULL;
buf_block_t* buf_block;
mem_block_t* block;
ulint len;
ut_ad((type == MEM_HEAP_DYNAMIC) || (type == MEM_HEAP_BUFFER)
|| (type == MEM_HEAP_BUFFER + MEM_HEAP_BTR_SEARCH));
ut_ad(type == MEM_HEAP_DYNAMIC || type == MEM_HEAP_BUFFER);
if (heap != NULL) {
ut_d(mem_heap_validate(heap));
......@@ -275,24 +272,11 @@ mem_heap_create_block_func(
ut_ad(type == MEM_HEAP_DYNAMIC || n <= MEM_MAX_ALLOC_IN_BUF);
block = static_cast<mem_block_t*>(ut_malloc_nokey(len));
buf_block = nullptr;
} else {
len = srv_page_size;
if ((type & MEM_HEAP_BTR_SEARCH) && heap) {
/* We cannot allocate the block from the
buffer pool, but must get the free block from
the heap header free block field */
buf_block = static_cast<buf_block_t*>(heap->free_block);
heap->free_block = NULL;
if (UNIV_UNLIKELY(!buf_block)) {
return(NULL);
}
} else {
buf_block = buf_block_alloc();
}
buf_block = buf_block_alloc();
block = (mem_block_t*) buf_block->page.frame;
}
......@@ -303,7 +287,9 @@ mem_heap_create_block_func(
}
block->buf_block = buf_block;
block->free_block = NULL;
#ifdef BTR_CUR_HASH_ADAPT
block->ahi_block = nullptr;
#endif
ut_d(ut_strlcpy_rev(block->file_name, file_name,
sizeof(block->file_name)));
......@@ -339,8 +325,7 @@ mem_heap_create_block_func(
/***************************************************************//**
Adds a new block to a memory heap.
@return created block, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return created block */
mem_block_t*
mem_heap_add_block(
/*===============*/
......@@ -399,9 +384,6 @@ mem_heap_block_free(
{
ulint type;
ulint len;
buf_block_t* buf_block;
buf_block = static_cast<buf_block_t*>(block->buf_block);
UT_LIST_REMOVE(heap->base, block);
......@@ -412,25 +394,10 @@ mem_heap_block_free(
len = block->len;
if (type == MEM_HEAP_DYNAMIC || len < srv_page_size / 2) {
ut_ad(!buf_block);
ut_ad(!block->buf_block);
ut_free(block);
} else {
ut_ad(type & MEM_HEAP_BUFFER);
buf_block_free(buf_block);
}
}
/******************************************************************//**
Frees the free_block field from a memory heap. */
void
mem_heap_free_block_free(
/*=====================*/
mem_heap_t* heap) /*!< in: heap */
{
if (UNIV_LIKELY_NULL(heap->free_block)) {
buf_block_free(static_cast<buf_block_t*>(heap->free_block));
heap->free_block = NULL;
buf_block_free(block->buf_block);
}
}
......@@ -811,11 +811,10 @@ srv_printf_innodb_monitor(
for (ulint i = 0; i < btr_ahi_parts && btr_search_enabled; ++i) {
const auto part= &btr_search_sys.parts[i];
part->latch.rd_lock(SRW_LOCK_CALL);
ut_ad(part->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
fprintf(file, "Hash table size " ULINTPF
", node heap has " ULINTPF " buffer(s)\n",
part->table.n_cells,
part->heap->base.count - !part->heap->free_block);
part->heap->base.count - !part->heap->ahi_block);
part->latch.rd_unlock();
}
......@@ -949,10 +948,11 @@ srv_export_innodb_status(void)
const auto part= &btr_search_sys.parts[i];
part->latch.rd_lock(SRW_LOCK_CALL);
if (part->heap) {
ut_ad(part->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
mem_adaptive_hash += mem_heap_get_size(part->heap)
+ part->table.n_cells * sizeof(hash_cell_t);
ut_ad(part->heap->type == MEM_HEAP_BUFFER);
mem_adaptive_hash += part->heap->total_size
+ !!part->heap->ahi_block * srv_page_size
+ part->table.n_cells
* sizeof *part->table.array;
}
part->latch.rd_unlock();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment