Commit 9159b897 authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-22871: Clean up hash_table_t

HASH_TABLE_SYNC_MUTEX was kind-of used for the adaptive hash index,
even though that hash table is already protected by btr_search_latches[].

HASH_TABLE_SYNC_RWLOCK was only being used for buf_pool.page_hash.
It is cleaner to decouple that synchronization from hash_table_t,
and move it to the actual user.

buf_pool_t::page_hash_latches[]: Synchronization for buf_pool.page_hash.

LATCH_ID_HASH_TABLE_MUTEX: Remove.

hash_table_t::sync_obj, hash_table_t::n_sync_obj: Remove.

hash_table_t::type, hash_table_sync_t: Remove.

HASH_ASSERT_OWN(), hash_get_mutex(), hash_get_nth_mutex(): Remove.

ib_recreate(): Merge to the only caller, buf_pool_resize_hash().

ib_create(): Merge to the callers.

ha_clear(): Merge to the only caller buf_pool_t::close().

buf_pool_t::create(): Merge the ib_create() and
hash_create_sync_obj() invocations.

ha_insert_for_fold_func(): Clarify an assertion.

buf_pool_t::page_hash_lock(): Simplify the logic.

hash_assert_can_search(), hash_assert_can_modify(): Remove.
These predicates were only being invoked for the adaptive hash index,
while they only are effective for buf_pool.page_hash.

HASH_DELETE_AND_COMPACT(): Merge to ha_delete_hash_node().

hash_get_sync_obj_index(): Remove.

hash_table_t::heaps[], hash_get_nth_heap(): Remove. It was actually unused!

hash_get_heap(): Remove. It was only used in ha_delete_hash_node(),
where we always use hash_table_t::heap.

hash_table_t::calc_hash(): Replaces hash_calc_hash().
parent 08f6513c
...@@ -387,9 +387,13 @@ void btr_search_enable(bool resize) ...@@ -387,9 +387,13 @@ void btr_search_enable(bool resize)
ut_malloc(sizeof(hash_table_t*) * btr_ahi_parts, mem_key_ahi)); ut_malloc(sizeof(hash_table_t*) * btr_ahi_parts, mem_key_ahi));
for (ulint i = 0; i < btr_ahi_parts; ++i) { for (ulint i = 0; i < btr_ahi_parts; ++i) {
btr_search_sys->hash_tables[i] = btr_search_sys->hash_tables[i] =
ib_create((hash_size / btr_ahi_parts), hash_create(hash_size / btr_ahi_parts);
LATCH_ID_HASH_TABLE_MUTEX, btr_search_sys->hash_tables[i]->heap = mem_heap_create_typed(
0, MEM_HEAP_FOR_BTR_SEARCH); std::min<ulong>(4096,
MEM_MAX_ALLOC_IN_BUF / 2
- MEM_BLOCK_HEADER_SIZE
- MEM_SPACE_NEEDED(0)),
MEM_HEAP_FOR_BTR_SEARCH);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
btr_search_sys->hash_tables[i]->adaptive = TRUE; btr_search_sys->hash_tables[i]->adaptive = TRUE;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
......
...@@ -1523,11 +1523,10 @@ bool buf_pool_t::create() ...@@ -1523,11 +1523,10 @@ bool buf_pool_t::create()
ut_a(srv_n_page_hash_locks != 0); ut_a(srv_n_page_hash_locks != 0);
ut_a(srv_n_page_hash_locks <= MAX_PAGE_HASH_LOCKS); ut_a(srv_n_page_hash_locks <= MAX_PAGE_HASH_LOCKS);
page_hash= ib_create(2 * curr_size, page_hash= hash_create(2 * curr_size);
LATCH_ID_HASH_TABLE_RW_LOCK, for (auto i= srv_n_page_hash_locks; i--; )
srv_n_page_hash_locks, MEM_HEAP_FOR_PAGE_HASH); rw_lock_create(hash_table_locks_key, &page_hash_latches[i],
SYNC_BUF_PAGE_HASH);
ut_ad(!page_hash_old);
zip_hash= hash_create(2 * curr_size); zip_hash= hash_create(2 * curr_size);
last_printout_time= time(NULL); last_printout_time= time(NULL);
...@@ -1605,7 +1604,8 @@ void buf_pool_t::close() ...@@ -1605,7 +1604,8 @@ void buf_pool_t::close()
ut_free(chunks); ut_free(chunks);
chunks= nullptr; chunks= nullptr;
ha_clear(page_hash); for (auto i= srv_n_page_hash_locks; i--; )
rw_lock_free(&page_hash_latches[i]);
hash_table_free(page_hash); hash_table_free(page_hash);
hash_table_free(zip_hash); hash_table_free(zip_hash);
...@@ -1924,78 +1924,44 @@ inline bool buf_pool_t::withdraw_blocks() ...@@ -1924,78 +1924,44 @@ inline bool buf_pool_t::withdraw_blocks()
/** resize page_hash and zip_hash */ /** resize page_hash and zip_hash */
static void buf_pool_resize_hash() static void buf_pool_resize_hash()
{ {
hash_table_t* new_hash_table; hash_table_t *new_hash_table= hash_create(2 * buf_pool.curr_size);
ut_ad(buf_pool.page_hash_old == NULL);
/* recreate page_hash */
new_hash_table = ib_recreate(
buf_pool.page_hash, 2 * buf_pool.curr_size);
for (ulint i = 0; i < hash_get_n_cells(buf_pool.page_hash); i++) {
buf_page_t* bpage;
bpage = static_cast<buf_page_t*>(
HASH_GET_FIRST(
buf_pool.page_hash, i));
while (bpage) {
buf_page_t* prev_bpage = bpage;
ulint fold;
for (ulint i= 0; i < hash_get_n_cells(buf_pool.page_hash); i++)
{
while (buf_page_t *bpage= static_cast<buf_page_t*>
(HASH_GET_FIRST(buf_pool.page_hash, i)))
{
buf_page_t *prev_bpage= bpage;
ut_ad(bpage->in_page_hash); ut_ad(bpage->in_page_hash);
bpage = static_cast<buf_page_t*>( bpage= static_cast<buf_page_t*>(HASH_GET_NEXT(hash, prev_bpage));
HASH_GET_NEXT( const ulint fold= prev_bpage->id().fold();
hash, prev_bpage)); HASH_DELETE(buf_page_t, hash, buf_pool.page_hash, fold, prev_bpage);
HASH_INSERT(buf_page_t, hash, new_hash_table, fold, prev_bpage);
fold = prev_bpage->id().fold();
HASH_DELETE(buf_page_t, hash,
buf_pool.page_hash, fold,
prev_bpage);
HASH_INSERT(buf_page_t, hash,
new_hash_table, fold,
prev_bpage);
} }
} }
buf_pool.page_hash_old = buf_pool.page_hash; std::swap(buf_pool.page_hash->array, new_hash_table->array);
buf_pool.page_hash = new_hash_table; buf_pool.page_hash->n_cells= new_hash_table->n_cells;
hash_table_free(new_hash_table);
/* recreate zip_hash */ /* recreate zip_hash */
new_hash_table = hash_create(2 * buf_pool.curr_size); new_hash_table= hash_create(2 * buf_pool.curr_size);
for (ulint i = 0; i < hash_get_n_cells(buf_pool.zip_hash); i++) { for (ulint i= 0; i < hash_get_n_cells(buf_pool.zip_hash); i++)
buf_page_t* bpage; {
while (buf_page_t *bpage= static_cast<buf_page_t*>
bpage = static_cast<buf_page_t*>( (HASH_GET_FIRST(buf_pool.zip_hash, i)))
HASH_GET_FIRST(buf_pool.zip_hash, i)); {
buf_page_t *prev_bpage= bpage;
while (bpage) { bpage= static_cast<buf_page_t*>(HASH_GET_NEXT(hash, prev_bpage));
buf_page_t* prev_bpage = bpage; const ulint fold= BUF_POOL_ZIP_FOLD_BPAGE(prev_bpage);
ulint fold; HASH_DELETE(buf_page_t, hash, buf_pool.zip_hash, fold, prev_bpage);
HASH_INSERT(buf_page_t, hash, new_hash_table, fold, prev_bpage);
bpage = static_cast<buf_page_t*>(
HASH_GET_NEXT(
hash, prev_bpage));
fold = BUF_POOL_ZIP_FOLD(
reinterpret_cast<buf_block_t*>(
prev_bpage));
HASH_DELETE(buf_page_t, hash,
buf_pool.zip_hash, fold,
prev_bpage);
HASH_INSERT(buf_page_t, hash,
new_hash_table, fold,
prev_bpage);
} }
} }
hash_table_free(buf_pool.zip_hash); hash_table_free(buf_pool.zip_hash);
buf_pool.zip_hash = new_hash_table; buf_pool.zip_hash= new_hash_table;
} }
...@@ -2163,7 +2129,9 @@ inline void buf_pool_t::resize() ...@@ -2163,7 +2129,9 @@ inline void buf_pool_t::resize()
resizing.store(true, std::memory_order_relaxed); resizing.store(true, std::memory_order_relaxed);
mutex_enter(&mutex); mutex_enter(&mutex);
page_hash_lock_all(); for (auto i= srv_n_page_hash_locks; i--; )
rw_lock_x_lock(&page_hash_latches[i]);
chunk_t::map_reg = UT_NEW_NOKEY(chunk_t::map()); chunk_t::map_reg = UT_NEW_NOKEY(chunk_t::map());
/* add/delete chunks */ /* add/delete chunks */
...@@ -2312,13 +2280,9 @@ inline void buf_pool_t::resize() ...@@ -2312,13 +2280,9 @@ inline void buf_pool_t::resize()
ib::info() << "hash tables were resized"; ib::info() << "hash tables were resized";
} }
page_hash_unlock_all();
mutex_exit(&mutex); mutex_exit(&mutex);
for (auto i= srv_n_page_hash_locks; i--; )
if (page_hash_old != NULL) { rw_lock_x_unlock(&page_hash_latches[i]);
hash_table_free(page_hash_old);
page_hash_old = NULL;
}
UT_DELETE(chunk_map_old); UT_DELETE(chunk_map_old);
......
...@@ -32,166 +32,6 @@ Created 8/22/1994 Heikki Tuuri ...@@ -32,166 +32,6 @@ Created 8/22/1994 Heikki Tuuri
#include "btr0sea.h" #include "btr0sea.h"
#include "page0page.h" #include "page0page.h"
/*************************************************************//**
Creates a hash table with at least n array cells. The actual number
of cells is chosen to be a prime number slightly bigger than n.
@return own: created table */
hash_table_t*
ib_create(
/*======*/
ulint n, /*!< in: number of array cells */
latch_id_t id, /*!< in: latch ID */
ulint n_sync_obj,
/*!< in: number of mutexes to protect the
hash table: must be a power of 2, or 0 */
ulint type) /*!< in: type of datastructure for which
MEM_HEAP_FOR_PAGE_HASH */
{
hash_table_t* table;
ut_a(type == MEM_HEAP_FOR_BTR_SEARCH
|| type == MEM_HEAP_FOR_PAGE_HASH);
ut_ad(ut_is_2pow(n_sync_obj));
table = hash_create(n);
/* Creating MEM_HEAP_BTR_SEARCH type heaps can potentially fail,
but in practise it never should in this case, hence the asserts. */
if (n_sync_obj == 0) {
table->heap = mem_heap_create_typed(
std::min<ulong>(
4096,
MEM_MAX_ALLOC_IN_BUF / 2
- MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)),
type);
ut_a(table->heap);
return(table);
}
if (type == MEM_HEAP_FOR_PAGE_HASH) {
/* We create a hash table protected by rw_locks for
buf_pool.page_hash. */
hash_create_sync_obj(
table, HASH_TABLE_SYNC_RW_LOCK, id, n_sync_obj);
} else {
hash_create_sync_obj(
table, HASH_TABLE_SYNC_MUTEX, id, n_sync_obj);
}
table->heaps = static_cast<mem_heap_t**>(
ut_malloc_nokey(n_sync_obj * sizeof(void*)));
for (ulint i = 0; i < n_sync_obj; i++) {
table->heaps[i] = mem_heap_create_typed(
std::min<ulong>(
4096,
MEM_MAX_ALLOC_IN_BUF / 2
- MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)),
type);
ut_a(table->heaps[i]);
}
return(table);
}
/** Recreate a hash table with at least n array cells. The actual number
of cells is chosen to be a prime number slightly bigger than n.
The new cells are all cleared. The heaps are recreated.
The sync objects are reused.
@param[in,out] table hash table to be resuzed (to be freed later)
@param[in] n number of array cells
@return resized new table */
hash_table_t*
ib_recreate(
hash_table_t* table,
ulint n)
{
/* This function is for only page_hash for now */
ut_ad(table->type == HASH_TABLE_SYNC_RW_LOCK);
ut_ad(table->n_sync_obj > 0);
hash_table_t* new_table = hash_create(n);
new_table->type = table->type;
new_table->n_sync_obj = table->n_sync_obj;
new_table->sync_obj = table->sync_obj;
for (ulint i = 0; i < table->n_sync_obj; i++) {
mem_heap_free(table->heaps[i]);
}
ut_free(table->heaps);
new_table->heaps = static_cast<mem_heap_t**>(
ut_malloc_nokey(new_table->n_sync_obj * sizeof(void*)));
for (ulint i = 0; i < new_table->n_sync_obj; i++) {
new_table->heaps[i] = mem_heap_create_typed(
std::min<ulong>(
4096,
MEM_MAX_ALLOC_IN_BUF / 2
- MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)),
MEM_HEAP_FOR_PAGE_HASH);
ut_a(new_table->heaps[i]);
}
return(new_table);
}
/*************************************************************//**
Empties a hash table and frees the memory heaps. */
void
ha_clear(
/*=====*/
hash_table_t* table) /*!< in, own: hash table */
{
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!table->adaptive || btr_search_own_all(RW_LOCK_X));
#endif /* BTR_CUR_HASH_ADAPT */
for (ulint i = 0; i < table->n_sync_obj; i++) {
mem_heap_free(table->heaps[i]);
}
ut_free(table->heaps);
switch (table->type) {
case HASH_TABLE_SYNC_MUTEX:
for (ulint i = 0; i < table->n_sync_obj; ++i) {
mutex_destroy(&table->sync_obj.mutexes[i]);
}
ut_free(table->sync_obj.mutexes);
table->sync_obj.mutexes = NULL;
break;
case HASH_TABLE_SYNC_RW_LOCK:
for (ulint i = 0; i < table->n_sync_obj; ++i) {
rw_lock_free(&table->sync_obj.rw_locks[i]);
}
ut_free(table->sync_obj.rw_locks);
table->sync_obj.rw_locks = NULL;
break;
case HASH_TABLE_SYNC_NONE:
/* do nothing */
break;
}
table->n_sync_obj = 0;
table->type = HASH_TABLE_SYNC_NONE;
/* Clear the hash table. */
ulint n = hash_get_n_cells(table);
for (ulint i = 0; i < n; i++) {
hash_get_nth_cell(table, i)->node = NULL;
}
}
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG # if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Maximum number of records in a page */ /** Maximum number of records in a page */
...@@ -199,42 +39,6 @@ static const ulint MAX_N_POINTERS ...@@ -199,42 +39,6 @@ static const ulint MAX_N_POINTERS
= UNIV_PAGE_SIZE_MAX / REC_N_NEW_EXTRA_BYTES; = UNIV_PAGE_SIZE_MAX / REC_N_NEW_EXTRA_BYTES;
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ # endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
# ifdef UNIV_DEBUG
/** Assert that the synchronization object in a hash operation involving
possible change in the hash table is held in exclusive mode */
void hash_assert_can_modify(hash_table_t *table, ulint fold)
{
switch (table->type) {
case HASH_TABLE_SYNC_MUTEX:
ut_ad(mutex_own(hash_get_mutex(table, fold)));
return;
case HASH_TABLE_SYNC_RW_LOCK:
ut_ad(buf_pool.page_hash_lock_own_flagged(fold, RW_LOCK_FLAG_X));
return;
case HASH_TABLE_SYNC_NONE:
return;
}
ut_ad(0);
}
/** Assert that the synchronization object in a hash operation involving
possible change in the hash table is held in share dor exclusive mode */
void hash_assert_can_search(hash_table_t *table, ulint fold)
{
switch (table->type) {
case HASH_TABLE_SYNC_MUTEX:
ut_ad(mutex_own(hash_get_mutex(table, fold)));
return;
case HASH_TABLE_SYNC_RW_LOCK:
ut_ad(buf_pool.page_hash_lock_own_flagged(fold, RW_LOCK_FLAG_X |
RW_LOCK_FLAG_S));
return;
case HASH_TABLE_SYNC_NONE:
return;
}
}
# endif
/*************************************************************//** /*************************************************************//**
Inserts an entry into a hash table. If an entry with the same fold number Inserts an entry into a hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node is found, its node is updated to point to the new data, and no new node
...@@ -262,10 +66,10 @@ ha_insert_for_fold_func( ...@@ -262,10 +66,10 @@ ha_insert_for_fold_func(
ut_ad(data); ut_ad(data);
ut_ad(table); ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
ut_ad(table->heap->type & MEM_HEAP_BTR_SEARCH);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(block->frame == page_align(data)); ut_a(block->frame == page_align(data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
hash_assert_can_modify(table, fold);
ut_ad(btr_search_enabled); ut_ad(btr_search_enabled);
hash = hash_calc_hash(fold, table); hash = hash_calc_hash(fold, table);
...@@ -296,16 +100,12 @@ ha_insert_for_fold_func( ...@@ -296,16 +100,12 @@ ha_insert_for_fold_func(
} }
/* We have to allocate a new chain node */ /* We have to allocate a new chain node */
node = static_cast<ha_node_t*>( node = static_cast<ha_node_t*>(
mem_heap_alloc(hash_get_heap(table, fold), sizeof(ha_node_t))); mem_heap_alloc(table->heap, sizeof(ha_node_t)));
if (node == NULL) { if (node == NULL) {
/* It was a btr search type memory heap and at the moment /* It was a btr search type memory heap and at the moment
no more memory could be allocated: return */ no more memory could be allocated: return */
ut_ad(hash_get_heap(table, fold)->type & MEM_HEAP_BTR_SEARCH);
return(FALSE); return(FALSE);
} }
...@@ -342,10 +142,8 @@ ha_insert_for_fold_func( ...@@ -342,10 +142,8 @@ ha_insert_for_fold_func(
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
/** Verify if latch corresponding to the hash table is x-latched /** Verify if latch corresponding to the hash table is x-latched
@param[in] table hash table */ @param table hash table */
static void ha_btr_search_latch_x_locked(const hash_table_t* table)
void
ha_btr_search_latch_x_locked(const hash_table_t* table)
{ {
ulint i; ulint i;
for (i = 0; i < btr_ahi_parts; ++i) { for (i = 0; i < btr_ahi_parts; ++i) {
...@@ -372,13 +170,53 @@ ha_delete_hash_node( ...@@ -372,13 +170,53 @@ ha_delete_hash_node(
ut_d(ha_btr_search_latch_x_locked(table)); ut_d(ha_btr_search_latch_x_locked(table));
ut_ad(btr_search_enabled); ut_ad(btr_search_enabled);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (table->adaptive) { ut_a(table->adaptive);
ut_a(del_node->block->frame == page_align(del_node->data)); ut_a(del_node->block->frame == page_align(del_node->data));
ut_a(del_node->block->n_pointers-- < MAX_N_POINTERS); ut_a(del_node->block->n_pointers-- < MAX_N_POINTERS);
}
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
HASH_DELETE_AND_COMPACT(ha_node_t, next, table, del_node); ha_node_t* node;
const ulint fold = del_node->fold;
HASH_DELETE(ha_node_t, next, table, fold, del_node);
ha_node_t* top_node = (ha_node_t*) mem_heap_get_top(table->heap,
sizeof(ha_node_t));
/* If the node to remove is not the top node in the heap, compact the
heap of nodes by moving the top node in the place of del_node. */
if (del_node != top_node) {
/* Copy the top node in place of del_node */
*del_node = *top_node;
hash_cell_t* cell = hash_get_nth_cell(
table, hash_calc_hash(top_node->fold, table));
/* Look for the pointer to the top node, to update it */
if (cell->node == top_node) {
/* The top node is the first in the chain */
cell->node = del_node;
} else {
/* We have to look for the predecessor */
node = static_cast<ha_node_t*>(cell->node);
while (top_node != HASH_GET_NEXT(next, node)) {
node = static_cast<ha_node_t*>(
HASH_GET_NEXT(next, node));
}
/* Now we have the predecessor node */
node->next = del_node;
}
}
/* Free the space occupied by the top node */
mem_heap_free_top(table->heap, sizeof(ha_node_t));
} }
/*********************************************************//** /*********************************************************//**
...@@ -400,7 +238,6 @@ ha_search_and_update_if_found_func( ...@@ -400,7 +238,6 @@ ha_search_and_update_if_found_func(
ut_ad(table); ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
hash_assert_can_modify(table, fold);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(new_block->frame == page_align(new_data)); ut_a(new_block->frame == page_align(new_data));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
...@@ -444,8 +281,8 @@ ha_remove_all_nodes_to_page( ...@@ -444,8 +281,8 @@ ha_remove_all_nodes_to_page(
ut_ad(table); ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
hash_assert_can_modify(table, fold);
ut_ad(btr_search_enabled); ut_ad(btr_search_enabled);
ut_d(ha_btr_search_latch_x_locked(table));
node = ha_chain_get_first(table, fold); node = ha_chain_get_first(table, fold);
......
...@@ -28,47 +28,21 @@ Created 5/20/1997 Heikki Tuuri ...@@ -28,47 +28,21 @@ Created 5/20/1997 Heikki Tuuri
#include "mem0mem.h" #include "mem0mem.h"
#include "sync0sync.h" #include "sync0sync.h"
/*************************************************************//** /**
Creates a hash table with >= n array cells. The actual number of cells is Create a hash table.
chosen to be a prime number slightly bigger than n. @param n the minimum number of hash array elements
@return own: created table */ @return created table (with n_cells being a prime, at least n) */
hash_table_t* hash_table_t *hash_create(ulint n)
hash_create(
/*========*/
ulint n) /*!< in: number of array cells */
{ {
hash_cell_t* array; ulint prime= ut_find_prime(n);
ulint prime;
hash_table_t* table; hash_table_t *table= static_cast<hash_table_t*>
(ut_zalloc_nokey(sizeof *table));
prime = ut_find_prime(n); table->array= static_cast<hash_cell_t*>(ut_zalloc_nokey(sizeof(hash_cell_t) *
prime));
table = static_cast<hash_table_t*>( table->n_cells= prime;
ut_malloc_nokey(sizeof(hash_table_t))); ut_d(table->magic_n= HASH_TABLE_MAGIC_N);
return table;
array = static_cast<hash_cell_t*>(
ut_malloc_nokey(sizeof(hash_cell_t) * prime));
/* The default type of hash_table is HASH_TABLE_SYNC_NONE i.e.:
the caller is responsible for access control to the table. */
table->type = HASH_TABLE_SYNC_NONE;
table->array = array;
table->n_cells = prime;
#ifdef BTR_CUR_HASH_ADAPT
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
table->adaptive = FALSE;
# endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */
table->n_sync_obj = 0;
table->sync_obj.mutexes = NULL;
table->heaps = NULL;
table->heap = NULL;
ut_d(table->magic_n = HASH_TABLE_MAGIC_N);
/* Initialize the cell array */
hash_table_clear(table);
return(table);
} }
/*************************************************************//** /*************************************************************//**
...@@ -83,58 +57,3 @@ hash_table_free( ...@@ -83,58 +57,3 @@ hash_table_free(
ut_free(table->array); ut_free(table->array);
ut_free(table); ut_free(table);
} }
/*************************************************************//**
Creates a sync object array to protect a hash table.
::sync_obj can be mutexes or rw_locks depening on the type of
hash table. */
void
hash_create_sync_obj(
/*=================*/
hash_table_t* table, /*!< in: hash table */
enum hash_table_sync_t type, /*!< in: HASH_TABLE_SYNC_MUTEX
or HASH_TABLE_SYNC_RW_LOCK */
latch_id_t id, /*!< in: latch ID */
ulint n_sync_obj)/*!< in: number of sync objects,
must be a power of 2 */
{
ut_a(n_sync_obj > 0);
ut_a(ut_is_2pow(n_sync_obj));
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
table->type = type;
switch (table->type) {
case HASH_TABLE_SYNC_MUTEX:
table->sync_obj.mutexes = static_cast<ib_mutex_t*>(
ut_malloc_nokey(n_sync_obj * sizeof(ib_mutex_t)));
for (ulint i = 0; i < n_sync_obj; i++) {
mutex_create(id, table->sync_obj.mutexes + i);
}
break;
case HASH_TABLE_SYNC_RW_LOCK: {
latch_level_t level = sync_latch_get_level(id);
ut_a(level != SYNC_UNKNOWN);
table->sync_obj.rw_locks = static_cast<rw_lock_t*>(
ut_malloc_nokey(n_sync_obj * sizeof(rw_lock_t)));
for (ulint i = 0; i < n_sync_obj; i++) {
rw_lock_create(hash_table_locks_key,
table->sync_obj.rw_locks + i, level);
}
break;
}
case HASH_TABLE_SYNC_NONE:
ut_error;
}
table->n_sync_obj = n_sync_obj;
}
...@@ -531,7 +531,6 @@ static PSI_mutex_info all_innodb_mutexes[] = { ...@@ -531,7 +531,6 @@ static PSI_mutex_info all_innodb_mutexes[] = {
PSI_KEY(fts_optimize_mutex), PSI_KEY(fts_optimize_mutex),
PSI_KEY(fts_doc_id_mutex), PSI_KEY(fts_doc_id_mutex),
PSI_KEY(log_flush_order_mutex), PSI_KEY(log_flush_order_mutex),
PSI_KEY(hash_table_mutex),
PSI_KEY(ibuf_bitmap_mutex), PSI_KEY(ibuf_bitmap_mutex),
PSI_KEY(ibuf_mutex), PSI_KEY(ibuf_mutex),
PSI_KEY(ibuf_pessimistic_insert_mutex), PSI_KEY(ibuf_pessimistic_insert_mutex),
......
...@@ -1612,43 +1612,40 @@ class buf_pool_t ...@@ -1612,43 +1612,40 @@ class buf_pool_t
/** Get a page_hash latch. */ /** Get a page_hash latch. */
rw_lock_t *hash_lock_get_low(ulint fold) const rw_lock_t *hash_lock_get_low(ulint fold) const
{ {
return page_hash->sync_obj.rw_locks + return page_hash_latches +
hash_get_sync_obj_index(page_hash, fold); ut_2pow_remainder(page_hash->calc_hash(fold),
ulint{srv_n_page_hash_locks});
} }
#ifdef UNIV_DEBUG private:
/** Check whether a page_hash latch is being held */ /** Get a page_hash latch. */
bool page_hash_lock_own_flagged(ulint fold, rw_lock_flags_t flagged) const rw_lock_t *hash_lock_get_low(ulint fold, ulint n_cells) const
{ {
return rw_lock_own_flagged(hash_lock_get_low(fold), flagged); return page_hash_latches +
ut_2pow_remainder(ut_hash_ulint(fold, n_cells),
ulint{srv_n_page_hash_locks});
} }
#endif public:
/** Acquire a page_hash bucket latch, tolerating concurrent resize() /** Acquire a page_hash bucket latch, tolerating concurrent resize()
@tparam exclusive whether the latch is to be acquired exclusively @tparam exclusive whether the latch is to be acquired exclusively
@param fold hash bucket key */ @param fold hash bucket key */
template<bool exclusive> rw_lock_t *page_hash_lock(ulint fold) template<bool exclusive> rw_lock_t *page_hash_lock(ulint fold)
{ {
rw_lock_t *latch= hash_lock_get_low(fold); for (;;)
{
auto n_cells= page_hash->n_cells;
rw_lock_t *latch= hash_lock_get_low(fold, n_cells);
if (exclusive) if (exclusive)
rw_lock_x_lock(latch); rw_lock_x_lock(latch);
else else
rw_lock_s_lock(latch); rw_lock_s_lock(latch);
rw_lock_t *l; if (UNIV_LIKELY(n_cells == page_hash->n_cells))
while ((l= hash_lock_get_low(fold)) != latch) return latch;
{
if (exclusive) if (exclusive)
rw_lock_x_unlock(latch); rw_lock_x_unlock(latch);
else else
rw_lock_s_unlock(latch); rw_lock_s_unlock(latch);
/* FIXME: what if we resize() completes several times while we
are not holding any latch here? Is the latch guaranteed to be valid? */
if (exclusive)
rw_lock_x_lock(l);
else
rw_lock_s_lock(l);
latch= l;
} }
return latch;
} }
/** Look up a block descriptor. /** Look up a block descriptor.
...@@ -1728,24 +1725,6 @@ class buf_pool_t ...@@ -1728,24 +1725,6 @@ class buf_pool_t
return page_hash_get_locked<false,watch>(page_id, page_id.fold(), nullptr); return page_hash_get_locked<false,watch>(page_id, page_id.fold(), nullptr);
} }
/** Acquire exclusive latches on all page_hash buckets. */
void page_hash_lock_all() const
{
ut_ad(page_hash->magic_n == HASH_TABLE_MAGIC_N);
ut_ad(page_hash->type == HASH_TABLE_SYNC_RW_LOCK);
for (ulint i= 0; i < page_hash->n_sync_obj; i++)
rw_lock_x_lock(&page_hash->sync_obj.rw_locks[i]);
}
/** Release exclusive latches on all the page_hash buckets. */
void page_hash_unlock_all() const
{
ut_ad(page_hash->magic_n == HASH_TABLE_MAGIC_N);
ut_ad(page_hash->type == HASH_TABLE_SYNC_RW_LOCK);
for (ulint i = 0; i < page_hash->n_sync_obj; i++)
rw_lock_x_unlock(&page_hash->sync_obj.rw_locks[i]);
}
/** Determine if a block is a sentinel for a buffer pool watch. /** Determine if a block is a sentinel for a buffer pool watch.
@param bpage page descriptor @param bpage page descriptor
@return whether bpage a sentinel for a buffer pool watch */ @return whether bpage a sentinel for a buffer pool watch */
...@@ -1894,10 +1873,11 @@ class buf_pool_t ...@@ -1894,10 +1873,11 @@ class buf_pool_t
Atomic_counter<uint32_t> read_ahead_area; Atomic_counter<uint32_t> read_ahead_area;
/** Hash table of file pages (buf_page_t::in_file() holds), /** Hash table of file pages (buf_page_t::in_file() holds),
indexed by page_id_t. Protected by both mutex and hash_lock_get(id). */ indexed by page_id_t. Protected by both mutex and page_hash_latches[]. */
hash_table_t *page_hash; hash_table_t *page_hash;
hash_table_t* page_hash_old; /*!< old pointer to page_hash to be /** Latches protecting page_hash */
freed after resizing buffer pool */ mutable rw_lock_t page_hash_latches[MAX_PAGE_HASH_LOCKS];
hash_table_t* zip_hash; /*!< hash table of buf_block_t blocks hash_table_t* zip_hash; /*!< hash table of buf_block_t blocks
whose frames are allocated to the whose frames are allocated to the
zip buddy system, zip buddy system,
......
...@@ -19,7 +19,7 @@ this program; if not, write to the Free Software Foundation, Inc., ...@@ -19,7 +19,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
/**************************************************//** /**************************************************//**
@file include/ha0ha.h @file include/ha0ha.h
The hash table with external chains The hash table interface for the adaptive hash index
Created 8/18/1994 Heikki Tuuri Created 8/18/1994 Heikki Tuuri
*******************************************************/ *******************************************************/
...@@ -81,41 +81,6 @@ updates the pointer to data if found. ...@@ -81,41 +81,6 @@ updates the pointer to data if found.
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
/*************************************************************//**
Creates a hash table with at least n array cells. The actual number
of cells is chosen to be a prime number slightly bigger than n.
@return own: created table */
hash_table_t*
ib_create(
/*======*/
ulint n, /*!< in: number of array cells */
latch_id_t id, /*!< in: latch ID */
ulint n_mutexes,/*!< in: number of mutexes to protect the
hash table: must be a power of 2, or 0 */
ulint type); /*!< in: type of datastructure for which
the memory heap is going to be used e.g.:
MEM_HEAP_FOR_BTR_SEARCH or
MEM_HEAP_FOR_PAGE_HASH */
/** Recreate a hash table with at least n array cells. The actual number
of cells is chosen to be a prime number slightly bigger than n.
The new cells are all cleared. The heaps are recreated.
The sync objects are reused.
@param[in,out] table hash table to be resuzed (to be freed later)
@param[in] n number of array cells
@return resized new table */
hash_table_t*
ib_recreate(
hash_table_t* table,
ulint n);
/*************************************************************//**
Empties a hash table and frees the memory heaps. */
void
ha_clear(
/*=====*/
hash_table_t* table); /*!< in, own: hash table */
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
/*************************************************************//** /*************************************************************//**
Inserts an entry into a hash table. If an entry with the same fold number Inserts an entry into a hash table. If an entry with the same fold number
...@@ -207,20 +172,8 @@ struct ha_node_t { ...@@ -207,20 +172,8 @@ struct ha_node_t {
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t* data; /*!< pointer to the data */ const rec_t* data; /*!< pointer to the data */
}; };
#endif /* BTR_CUR_HASH_ADAPT */
#if defined UNIV_DEBUG && defined BTR_CUR_HASH_ADAPT
/** Assert that the synchronization object in a hash operation involving
possible change in the hash table is held in exclusive mode */
void hash_assert_can_modify(hash_table_t *table, ulint fold);
/** Assert that the synchronization object in a hash operation involving
possible change in the hash table is held in share dor exclusive mode */
void hash_assert_can_search(hash_table_t *table, ulint fold);
#else /* UNIV_DEBUG */
#define hash_assert_can_modify(t, f)
#define hash_assert_can_search(t, f)
#endif /* UNIV_DEBUG */
#include "ha0ha.ic" #include "ha0ha.ic"
#endif /* BTR_CUR_HASH_ADAPT */
#endif #endif
...@@ -19,7 +19,7 @@ this program; if not, write to the Free Software Foundation, Inc., ...@@ -19,7 +19,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
/********************************************************************//** /********************************************************************//**
@file include/ha0ha.ic @file include/ha0ha.ic
The hash table with external chains The hash table interface for the adaptive hash index
Created 8/18/1994 Heikki Tuuri Created 8/18/1994 Heikki Tuuri
*************************************************************************/ *************************************************************************/
...@@ -110,7 +110,6 @@ ha_search_and_get_data( ...@@ -110,7 +110,6 @@ ha_search_and_get_data(
hash_table_t* table, /*!< in: hash table */ hash_table_t* table, /*!< in: hash table */
ulint fold) /*!< in: folded value of the searched data */ ulint fold) /*!< in: folded value of the searched data */
{ {
hash_assert_can_search(table, fold);
ut_ad(btr_search_enabled); ut_ad(btr_search_enabled);
for (const ha_node_t* node = ha_chain_get_first(table, fold); for (const ha_node_t* node = ha_chain_get_first(table, fold);
...@@ -139,8 +138,6 @@ ha_search_with_data( ...@@ -139,8 +138,6 @@ ha_search_with_data(
{ {
ha_node_t* node; ha_node_t* node;
hash_assert_can_search(table, fold);
ut_ad(btr_search_enabled); ut_ad(btr_search_enabled);
node = ha_chain_get_first(table, fold); node = ha_chain_get_first(table, fold);
...@@ -165,6 +162,12 @@ ha_delete_hash_node( ...@@ -165,6 +162,12 @@ ha_delete_hash_node(
hash_table_t* table, /*!< in: hash table */ hash_table_t* table, /*!< in: hash table */
ha_node_t* del_node); /*!< in: node to be deleted */ ha_node_t* del_node); /*!< in: node to be deleted */
#ifdef UNIV_DEBUG
/** Verify if latch corresponding to the hash table is x-latched
@param table hash table */
void ha_btr_search_latch_x_locked(const hash_table_t* table);
#endif /* UNIV_DEBUG */
/*********************************************************//** /*********************************************************//**
Looks for an element when we know the pointer to the data, and deletes Looks for an element when we know the pointer to the data, and deletes
it from the hash table, if found. it from the hash table, if found.
...@@ -179,7 +182,7 @@ ha_search_and_delete_if_found( ...@@ -179,7 +182,7 @@ ha_search_and_delete_if_found(
{ {
ha_node_t* node; ha_node_t* node;
hash_assert_can_modify(table, fold); ut_d(ha_btr_search_latch_x_locked(table));
ut_ad(btr_search_enabled); ut_ad(btr_search_enabled);
node = ha_search_with_data(table, fold, data); node = ha_search_with_data(table, fold, data);
......
...@@ -31,47 +31,19 @@ Created 5/20/1997 Heikki Tuuri ...@@ -31,47 +31,19 @@ Created 5/20/1997 Heikki Tuuri
#include "sync0rw.h" #include "sync0rw.h"
struct hash_table_t; struct hash_table_t;
struct hash_cell_t; struct hash_cell_t{
void* node; /*!< hash chain node, NULL if none */
};
typedef void* hash_node_t; typedef void* hash_node_t;
/* Fix Bug #13859: symbol collision between imap/mysql */ /* Fix Bug #13859: symbol collision between imap/mysql */
#define hash_create hash0_create #define hash_create hash0_create
/* Differnt types of hash_table based on the synchronization /**
method used for it. */ Create a hash table.
enum hash_table_sync_t { @param n the minimum number of hash array elements
HASH_TABLE_SYNC_NONE = 0, /*!< Don't use any internal @return created table (with n_cells being a prime, at least n) */
synchronization objects for hash_table_t *hash_create(ulint n);
this hash_table. */
HASH_TABLE_SYNC_MUTEX, /*!< Use mutexes to control
access to this hash_table. */
HASH_TABLE_SYNC_RW_LOCK /*!< Use rw_locks to control
access to this hash_table. */
};
/*************************************************************//**
Creates a hash table with >= n array cells. The actual number
of cells is chosen to be a prime number slightly bigger than n.
@return own: created table */
hash_table_t*
hash_create(
/*========*/
ulint n); /*!< in: number of array cells */
/*************************************************************//**
Creates a sync object array array to protect a hash table.
::sync_obj can be mutexes or rw_locks depening on the type of
hash table. */
void
hash_create_sync_obj(
/*=================*/
hash_table_t* table, /*!< in: hash table */
hash_table_sync_t type, /*!< in: HASH_TABLE_SYNC_MUTEX
or HASH_TABLE_SYNC_RW_LOCK */
latch_id_t id, /*!< in: mutex/rw_lock ID */
ulint n_sync_obj);/*!< in: number of sync objects,
must be a power of 2 */
/*************************************************************//** /*************************************************************//**
Frees a hash table. */ Frees a hash table. */
...@@ -79,20 +51,8 @@ void ...@@ -79,20 +51,8 @@ void
hash_table_free( hash_table_free(
/*============*/ /*============*/
hash_table_t* table); /*!< in, own: hash table */ hash_table_t* table); /*!< in, own: hash table */
/**************************************************************//**
Calculates the hash value from a folded value. #define hash_calc_hash(FOLD, TABLE) (TABLE)->calc_hash(FOLD)
@return hashed value */
UNIV_INLINE
ulint
hash_calc_hash(
/*===========*/
ulint fold, /*!< in: folded value */
hash_table_t* table); /*!< in: hash table */
/********************************************************************//**
Assert that the mutex for the table is held */
#define HASH_ASSERT_OWN(TABLE, FOLD) \
ut_ad((TABLE)->type != HASH_TABLE_SYNC_MUTEX \
|| (mutex_own(hash_get_mutex((TABLE), FOLD))));
/*******************************************************************//** /*******************************************************************//**
Inserts a struct to a hash table. */ Inserts a struct to a hash table. */
...@@ -101,8 +61,6 @@ Inserts a struct to a hash table. */ ...@@ -101,8 +61,6 @@ Inserts a struct to a hash table. */
do {\ do {\
hash_cell_t* cell3333;\ hash_cell_t* cell3333;\
TYPE* struct3333;\ TYPE* struct3333;\
\
HASH_ASSERT_OWN(TABLE, FOLD)\
\ \
(DATA)->NAME = NULL;\ (DATA)->NAME = NULL;\
\ \
...@@ -130,8 +88,6 @@ do { \ ...@@ -130,8 +88,6 @@ do { \
hash_cell_t* cell3333; \ hash_cell_t* cell3333; \
TYPE* struct3333; \ TYPE* struct3333; \
\ \
HASH_ASSERT_OWN(TABLE, FOLD) \
\
(DATA)->NAME = NULL; \ (DATA)->NAME = NULL; \
\ \
cell3333 = hash_get_nth_cell(TABLE, hash_calc_hash(FOLD, TABLE));\ cell3333 = hash_get_nth_cell(TABLE, hash_calc_hash(FOLD, TABLE));\
...@@ -162,8 +118,6 @@ Deletes a struct from a hash table. */ ...@@ -162,8 +118,6 @@ Deletes a struct from a hash table. */
do {\ do {\
hash_cell_t* cell3333;\ hash_cell_t* cell3333;\
TYPE* struct3333;\ TYPE* struct3333;\
\
HASH_ASSERT_OWN(TABLE, FOLD)\
\ \
cell3333 = hash_get_nth_cell(TABLE, hash_calc_hash(FOLD, TABLE));\ cell3333 = hash_get_nth_cell(TABLE, hash_calc_hash(FOLD, TABLE));\
\ \
...@@ -211,9 +165,6 @@ Gets the next struct in a hash chain, NULL if none. */ ...@@ -211,9 +165,6 @@ Gets the next struct in a hash chain, NULL if none. */
Looks for a struct in a hash table. */ Looks for a struct in a hash table. */
#define HASH_SEARCH(NAME, TABLE, FOLD, TYPE, DATA, ASSERTION, TEST)\ #define HASH_SEARCH(NAME, TABLE, FOLD, TYPE, DATA, ASSERTION, TEST)\
{\ {\
\
HASH_ASSERT_OWN(TABLE, FOLD)\
\
(DATA) = (TYPE) HASH_GET_FIRST(TABLE, hash_calc_hash(FOLD, TABLE));\ (DATA) = (TYPE) HASH_GET_FIRST(TABLE, hash_calc_hash(FOLD, TABLE));\
HASH_ASSERT_VALID(DATA);\ HASH_ASSERT_VALID(DATA);\
\ \
...@@ -280,65 +231,6 @@ ulint ...@@ -280,65 +231,6 @@ ulint
hash_get_n_cells( hash_get_n_cells(
/*=============*/ /*=============*/
hash_table_t* table); /*!< in: table */ hash_table_t* table); /*!< in: table */
/*******************************************************************//**
Deletes a struct which is stored in the heap of the hash table, and compacts
the heap. The fold value must be stored in the struct NODE in a field named
'fold'. */
#define HASH_DELETE_AND_COMPACT(TYPE, NAME, TABLE, NODE)\
do {\
TYPE* node111;\
TYPE* top_node111;\
hash_cell_t* cell111;\
ulint fold111;\
\
fold111 = (NODE)->fold;\
\
HASH_DELETE(TYPE, NAME, TABLE, fold111, NODE);\
\
top_node111 = (TYPE*) mem_heap_get_top(\
hash_get_heap(TABLE, fold111),\
sizeof(TYPE));\
\
/* If the node to remove is not the top node in the heap, compact the\
heap of nodes by moving the top node in the place of NODE. */\
\
if (NODE != top_node111) {\
\
/* Copy the top node in place of NODE */\
\
*(NODE) = *top_node111;\
\
cell111 = hash_get_nth_cell(TABLE,\
hash_calc_hash(top_node111->fold, TABLE));\
\
/* Look for the pointer to the top node, to update it */\
\
if (cell111->node == top_node111) {\
/* The top node is the first in the chain */\
\
cell111->node = NODE;\
} else {\
/* We have to look for the predecessor of the top\
node */\
node111 = static_cast<TYPE*>(cell111->node);\
\
while (top_node111 != HASH_GET_NEXT(NAME, node111)) {\
\
node111 = static_cast<TYPE*>(\
HASH_GET_NEXT(NAME, node111));\
}\
\
/* Now we have the predecessor node */\
\
node111->NAME = NODE;\
}\
}\
\
/* Free the space occupied by the top node */\
\
mem_heap_free_top(hash_get_heap(TABLE, fold111), sizeof(TYPE));\
} while (0)
/****************************************************************//** /****************************************************************//**
Move all hash table entries from OLD_TABLE to NEW_TABLE. */ Move all hash table entries from OLD_TABLE to NEW_TABLE. */
...@@ -367,59 +259,8 @@ do {\ ...@@ -367,59 +259,8 @@ do {\
}\ }\
} while (0) } while (0)
/************************************************************//**
Gets the sync object index for a fold value in a hash table.
@return index */
UNIV_INLINE
ulint
hash_get_sync_obj_index(
/*====================*/
hash_table_t* table, /*!< in: hash table */
ulint fold); /*!< in: fold */
/************************************************************//**
Gets the nth heap in a hash table.
@return mem heap */
UNIV_INLINE
mem_heap_t*
hash_get_nth_heap(
/*==============*/
hash_table_t* table, /*!< in: hash table */
ulint i); /*!< in: index of the heap */
/************************************************************//**
Gets the heap for a fold value in a hash table.
@return mem heap */
UNIV_INLINE
mem_heap_t*
hash_get_heap(
/*==========*/
hash_table_t* table, /*!< in: hash table */
ulint fold); /*!< in: fold */
/************************************************************//**
Gets the nth mutex in a hash table.
@return mutex */
UNIV_INLINE
ib_mutex_t*
hash_get_nth_mutex(
/*===============*/
hash_table_t* table, /*!< in: hash table */
ulint i); /*!< in: index of the mutex */
/************************************************************//**
Gets the mutex for a fold value in a hash table.
@return mutex */
UNIV_INLINE
ib_mutex_t*
hash_get_mutex(
/*===========*/
hash_table_t* table, /*!< in: hash table */
ulint fold); /*!< in: fold */
struct hash_cell_t{
void* node; /*!< hash chain node, NULL if none */
};
/* The hash table structure */ /* The hash table structure */
struct hash_table_t { struct hash_table_t {
enum hash_table_sync_t type; /*<! type of hash_table. */
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG # if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ibool adaptive;/* TRUE if this is the hash ibool adaptive;/* TRUE if this is the hash
...@@ -429,31 +270,13 @@ struct hash_table_t { ...@@ -429,31 +270,13 @@ struct hash_table_t {
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
ulint n_cells;/* number of cells in the hash table */ ulint n_cells;/* number of cells in the hash table */
hash_cell_t* array; /*!< pointer to cell array */ hash_cell_t* array; /*!< pointer to cell array */
ulint n_sync_obj;/* if sync_objs != NULL, then
the number of either the number
of mutexes or the number of
rw_locks depending on the type.
Must be a power of 2 */
union {
ib_mutex_t* mutexes;/* NULL, or an array of mutexes
used to protect segments of the
hash table */
rw_lock_t* rw_locks;/* NULL, or an array of rw_locks
used to protect segments of the
buf_pool.page_hash */
} sync_obj;
mem_heap_t** heaps; /*!< if this is non-NULL, hash
chain nodes for external chaining
can be allocated from these memory
heaps; there are then n_mutexes
many of these heaps */
mem_heap_t* heap; mem_heap_t* heap;
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
ulint magic_n; ulint magic_n;
# define HASH_TABLE_MAGIC_N 76561114 # define HASH_TABLE_MAGIC_N 76561114
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
ulint calc_hash(ulint fold) const { return ut_hash_ulint(fold, n_cells); }
}; };
#include "hash0hash.ic" #include "hash0hash.ic"
......
...@@ -68,116 +68,3 @@ hash_get_n_cells( ...@@ -68,116 +68,3 @@ hash_get_n_cells(
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
return(table->n_cells); return(table->n_cells);
} }
/**************************************************************//**
Calculates the hash value from a folded value.
@return hashed value */
UNIV_INLINE
ulint
hash_calc_hash(
/*===========*/
ulint fold, /*!< in: folded value */
hash_table_t* table) /*!< in: hash table */
{
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
return(ut_hash_ulint(fold, table->n_cells));
}
/************************************************************//**
Gets the sync object index for a fold value in a hash table.
@return index */
UNIV_INLINE
ulint
hash_get_sync_obj_index(
/*====================*/
hash_table_t* table, /*!< in: hash table */
ulint fold) /*!< in: fold */
{
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
ut_ad(table->type != HASH_TABLE_SYNC_NONE);
ut_ad(ut_is_2pow(table->n_sync_obj));
return(ut_2pow_remainder(hash_calc_hash(fold, table),
table->n_sync_obj));
}
/************************************************************//**
Gets the nth heap in a hash table.
@return mem heap */
UNIV_INLINE
mem_heap_t*
hash_get_nth_heap(
/*==============*/
hash_table_t* table, /*!< in: hash table */
ulint i) /*!< in: index of the heap */
{
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
ut_ad(table->type != HASH_TABLE_SYNC_NONE);
ut_ad(i < table->n_sync_obj);
return(table->heaps[i]);
}
/************************************************************//**
Gets the heap for a fold value in a hash table.
@return mem heap */
UNIV_INLINE
mem_heap_t*
hash_get_heap(
/*==========*/
hash_table_t* table, /*!< in: hash table */
ulint fold) /*!< in: fold */
{
ulint i;
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
if (table->heap) {
return(table->heap);
}
i = hash_get_sync_obj_index(table, fold);
return(hash_get_nth_heap(table, i));
}
/************************************************************//**
Gets the nth mutex in a hash table.
@return mutex */
UNIV_INLINE
ib_mutex_t*
hash_get_nth_mutex(
/*===============*/
hash_table_t* table, /*!< in: hash table */
ulint i) /*!< in: index of the mutex */
{
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
ut_ad(table->type == HASH_TABLE_SYNC_MUTEX);
ut_ad(i < table->n_sync_obj);
return(table->sync_obj.mutexes + i);
}
/************************************************************//**
Gets the mutex for a fold value in a hash table.
@return mutex */
UNIV_INLINE
ib_mutex_t*
hash_get_mutex(
/*===========*/
hash_table_t* table, /*!< in: hash table */
ulint fold) /*!< in: fold */
{
ulint i;
ut_ad(table);
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
i = hash_get_sync_obj_index(table, fold);
return(hash_get_nth_mutex(table, i));
}
...@@ -58,7 +58,6 @@ buffer pool; the latter method is used for very big heaps */ ...@@ -58,7 +58,6 @@ buffer pool; the latter method is used for very big heaps */
/** Different type of heaps in terms of which datastructure is using them */ /** Different type of heaps in terms of which datastructure is using them */
#define MEM_HEAP_FOR_BTR_SEARCH (MEM_HEAP_BTR_SEARCH | MEM_HEAP_BUFFER) #define MEM_HEAP_FOR_BTR_SEARCH (MEM_HEAP_BTR_SEARCH | MEM_HEAP_BUFFER)
#define MEM_HEAP_FOR_PAGE_HASH (MEM_HEAP_DYNAMIC)
#define MEM_HEAP_FOR_LOCK_HEAP (MEM_HEAP_BUFFER) #define MEM_HEAP_FOR_LOCK_HEAP (MEM_HEAP_BUFFER)
/** The following start size is used for the first block in the memory heap if /** The following start size is used for the first block in the memory heap if
......
...@@ -62,7 +62,6 @@ extern mysql_pfs_key_t fts_delete_mutex_key; ...@@ -62,7 +62,6 @@ extern mysql_pfs_key_t fts_delete_mutex_key;
extern mysql_pfs_key_t fts_optimize_mutex_key; extern mysql_pfs_key_t fts_optimize_mutex_key;
extern mysql_pfs_key_t fts_doc_id_mutex_key; extern mysql_pfs_key_t fts_doc_id_mutex_key;
extern mysql_pfs_key_t fts_pll_tokenize_mutex_key; extern mysql_pfs_key_t fts_pll_tokenize_mutex_key;
extern mysql_pfs_key_t hash_table_mutex_key;
extern mysql_pfs_key_t ibuf_bitmap_mutex_key; extern mysql_pfs_key_t ibuf_bitmap_mutex_key;
extern mysql_pfs_key_t ibuf_mutex_key; extern mysql_pfs_key_t ibuf_mutex_key;
extern mysql_pfs_key_t ibuf_pessimistic_insert_mutex_key; extern mysql_pfs_key_t ibuf_pessimistic_insert_mutex_key;
......
...@@ -295,7 +295,6 @@ enum latch_id_t { ...@@ -295,7 +295,6 @@ enum latch_id_t {
LATCH_ID_FTS_OPTIMIZE, LATCH_ID_FTS_OPTIMIZE,
LATCH_ID_FTS_DOC_ID, LATCH_ID_FTS_DOC_ID,
LATCH_ID_FTS_PLL_TOKENIZE, LATCH_ID_FTS_PLL_TOKENIZE,
LATCH_ID_HASH_TABLE_MUTEX,
LATCH_ID_IBUF_BITMAP, LATCH_ID_IBUF_BITMAP,
LATCH_ID_IBUF, LATCH_ID_IBUF,
LATCH_ID_IBUF_PESSIMISTIC_INSERT, LATCH_ID_IBUF_PESSIMISTIC_INSERT,
......
...@@ -970,10 +970,7 @@ srv_printf_innodb_monitor( ...@@ -970,10 +970,7 @@ srv_printf_innodb_monitor(
const hash_table_t* table = btr_search_sys->hash_tables[i]; const hash_table_t* table = btr_search_sys->hash_tables[i];
ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
/* this is only used for buf_pool.page_hash */ ut_ad(table->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
ut_ad(!table->heaps);
/* this is used for the adaptive hash index */
ut_ad(table->heap);
const mem_heap_t* heap = table->heap; const mem_heap_t* heap = table->heap;
/* The heap may change during the following call, /* The heap may change during the following call,
...@@ -1140,10 +1137,7 @@ srv_export_innodb_status(void) ...@@ -1140,10 +1137,7 @@ srv_export_innodb_status(void)
ut_ad(ht); ut_ad(ht);
ut_ad(ht->heap); ut_ad(ht->heap);
/* Multiple mutexes/heaps are currently never used for adaptive ut_ad(ht->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
hash index tables. */
ut_ad(!ht->n_sync_obj);
ut_ad(!ht->heaps);
mem_adaptive_hash += mem_heap_get_size(ht->heap) mem_adaptive_hash += mem_heap_get_size(ht->heap)
+ ht->n_cells * sizeof(hash_cell_t); + ht->n_cells * sizeof(hash_cell_t);
......
...@@ -1294,9 +1294,6 @@ sync_latch_meta_init() ...@@ -1294,9 +1294,6 @@ sync_latch_meta_init()
LATCH_ADD_MUTEX(FTS_PLL_TOKENIZE, SYNC_FTS_TOKENIZE, LATCH_ADD_MUTEX(FTS_PLL_TOKENIZE, SYNC_FTS_TOKENIZE,
fts_pll_tokenize_mutex_key); fts_pll_tokenize_mutex_key);
LATCH_ADD_MUTEX(HASH_TABLE_MUTEX, SYNC_BUF_PAGE_HASH,
hash_table_mutex_key);
LATCH_ADD_MUTEX(IBUF_BITMAP, SYNC_IBUF_BITMAP_MUTEX, LATCH_ADD_MUTEX(IBUF_BITMAP, SYNC_IBUF_BITMAP_MUTEX,
ibuf_bitmap_mutex_key); ibuf_bitmap_mutex_key);
......
...@@ -49,7 +49,6 @@ mysql_pfs_key_t fts_delete_mutex_key; ...@@ -49,7 +49,6 @@ mysql_pfs_key_t fts_delete_mutex_key;
mysql_pfs_key_t fts_optimize_mutex_key; mysql_pfs_key_t fts_optimize_mutex_key;
mysql_pfs_key_t fts_doc_id_mutex_key; mysql_pfs_key_t fts_doc_id_mutex_key;
mysql_pfs_key_t fts_pll_tokenize_mutex_key; mysql_pfs_key_t fts_pll_tokenize_mutex_key;
mysql_pfs_key_t hash_table_mutex_key;
mysql_pfs_key_t ibuf_bitmap_mutex_key; mysql_pfs_key_t ibuf_bitmap_mutex_key;
mysql_pfs_key_t ibuf_mutex_key; mysql_pfs_key_t ibuf_mutex_key;
mysql_pfs_key_t ibuf_pessimistic_insert_mutex_key; mysql_pfs_key_t ibuf_pessimistic_insert_mutex_key;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment