Commit dfb001ed authored by Sergei Golubchik's avatar Sergei Golubchik

percona-server-5.6.22-72.0

parent 67da9e81
......@@ -294,7 +294,6 @@ UNIV_INTERN mysql_pfs_key_t buf_block_debug_latch_key;
#ifdef UNIV_PFS_MUTEX
UNIV_INTERN mysql_pfs_key_t buffer_block_mutex_key;
UNIV_INTERN mysql_pfs_key_t buf_pool_mutex_key;
UNIV_INTERN mysql_pfs_key_t buf_pool_zip_mutex_key;
UNIV_INTERN mysql_pfs_key_t buf_pool_flush_state_mutex_key;
UNIV_INTERN mysql_pfs_key_t buf_pool_LRU_list_mutex_key;
......@@ -1735,16 +1734,12 @@ buf_pool_watch_set(
ut_ad(!bpage->in_page_hash);
ut_ad(bpage->buf_fix_count == 0);
mutex_enter(&buf_pool->zip_mutex);
bpage->state = BUF_BLOCK_ZIP_PAGE;
bpage->space = static_cast<ib_uint32_t>(space);
bpage->offset = static_cast<ib_uint32_t>(offset);
bpage->buf_fix_count = 1;
bpage->buf_pool_index = buf_pool_index(buf_pool);
mutex_exit(&buf_pool->zip_mutex);
ut_d(bpage->in_page_hash = TRUE);
HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
fold, bpage);
......@@ -1796,7 +1791,6 @@ buf_pool_watch_remove(
#endif /* UNIV_SYNC_DEBUG */
ut_ad(buf_page_get_state(watch) == BUF_BLOCK_ZIP_PAGE);
ut_ad(buf_own_zip_mutex_for_page(watch));
HASH_DELETE(buf_page_t, hash, buf_pool->page_hash, fold, watch);
ut_d(watch->in_page_hash = FALSE);
......@@ -1839,9 +1833,7 @@ buf_pool_watch_unset(
#endif /* PAGE_ATOMIC_REF_COUNT */
if (bpage->buf_fix_count == 0) {
mutex_enter(&buf_pool->zip_mutex);
buf_pool_watch_remove(buf_pool, fold, bpage);
mutex_exit(&buf_pool->zip_mutex);
}
}
......@@ -4421,7 +4413,7 @@ buf_page_io_complete(
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
buf_page_get_flush_type(bpage) == BUF_FLUSH_LRU)) {
have_LRU_mutex = TRUE; /* optimistic */
have_LRU_mutex = true; /* optimistic */
}
retry_mutex:
if (have_LRU_mutex) {
......@@ -4441,7 +4433,7 @@ buf_page_io_complete(
&& !have_LRU_mutex)) {
mutex_exit(block_mutex);
have_LRU_mutex = TRUE;
have_LRU_mutex = true;
goto retry_mutex;
}
......
......@@ -1098,8 +1098,8 @@ buf_flush_page(
# if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
/********************************************************************//**
Writes a flushable page asynchronously from the buffer pool to a file.
NOTE: block->mutex must be held upon entering this function, and it will be
released by this function after flushing. This is loosely based on
NOTE: block and LRU list mutexes must be held upon entering this function, and
they will be released by this function after flushing. This is loosely based on
buf_flush_batch() and buf_flush_page().
@return TRUE if the page was flushed and the mutexes released */
UNIV_INTERN
......@@ -1653,6 +1653,8 @@ buf_do_LRU_batch(
flush_counters_t* n) /*!< out: flushed/evicted page
counts */
{
ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
if (buf_LRU_evict_from_unzip_LRU(buf_pool)) {
n->unzip_LRU_evicted
= buf_free_from_unzip_LRU_list_batch(buf_pool, max);
......
......@@ -526,7 +526,7 @@ buf_flush_or_remove_page(
mutex_exit(block_mutex);
*must_restart = TRUE;
*must_restart = true;
processed = false;
} else if (!flush) {
......
......@@ -640,9 +640,9 @@ buf_read_ahead_linear(
fail_count = 0;
for (i = low; i < high; i++) {
prio_rw_lock_t* hash_lock;
prio_rw_lock_t* hash_lock;
for (i = low; i < high; i++) {
bpage = buf_page_hash_get_s_locked(buf_pool, space, i,
&hash_lock);
......@@ -691,7 +691,7 @@ buf_read_ahead_linear(
/* If we got this far, we know that enough pages in the area have
been accessed in the right order: linear read-ahead can be sensible */
bpage = buf_page_hash_get(buf_pool, space, offset);
bpage = buf_page_hash_get_s_locked(buf_pool, space, offset, &hash_lock);
if (bpage == NULL) {
......@@ -719,6 +719,8 @@ buf_read_ahead_linear(
pred_offset = fil_page_get_prev(frame);
succ_offset = fil_page_get_next(frame);
rw_lock_s_unlock(hash_lock);
if ((offset == low) && (succ_offset == offset + 1)) {
/* This is ok, we can continue */
......
......@@ -340,7 +340,6 @@ static PSI_mutex_info all_innodb_mutexes[] = {
# ifndef PFS_SKIP_BUFFER_MUTEX_RWLOCK
{&buffer_block_mutex_key, "buffer_block_mutex", 0},
# endif /* !PFS_SKIP_BUFFER_MUTEX_RWLOCK */
{&buf_pool_mutex_key, "buf_pool_mutex", 0},
{&buf_pool_zip_mutex_key, "buf_pool_zip_mutex", 0},
{&buf_pool_LRU_list_mutex_key, "buf_pool_LRU_list_mutex", 0},
{&buf_pool_free_list_mutex_key, "buf_pool_free_list_mutex", 0},
......
......@@ -5279,11 +5279,6 @@ i_s_innodb_fill_buffer_pool(
info_buffer = (buf_page_info_t*) mem_heap_zalloc(
heap, mem_size);
/* Obtain appropriate mutexes. Since this is diagnostic
buffer pool info printout, we are not required to
preserve the overall consistency, so we can
release mutex periodically */
/* GO through each block in the chunk */
for (n_blocks = num_to_process; n_blocks--; block++) {
i_s_innodb_buffer_page_get_info(
......
......@@ -78,8 +78,8 @@ buf_flush_init_for_writing(
# if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
/********************************************************************//**
Writes a flushable page asynchronously from the buffer pool to a file.
NOTE: block->mutex must be held upon entering this function, and they will be
released by this function after flushing. This is loosely based on
NOTE: block and LRU list mutexes must be held upon entering this function, and
they will be released by this function after flushing. This is loosely based on
buf_flush_batch() and buf_flush_page().
@return TRUE if the page was flushed and the mutexes released */
UNIV_INTERN
......
......@@ -71,7 +71,6 @@ instrumentation due to their large number of instances. */
/* Key defines to register InnoDB mutexes with performance schema */
extern mysql_pfs_key_t autoinc_mutex_key;
extern mysql_pfs_key_t buffer_block_mutex_key;
extern mysql_pfs_key_t buf_pool_mutex_key;
extern mysql_pfs_key_t buf_pool_zip_mutex_key;
extern mysql_pfs_key_t buf_pool_LRU_list_mutex_key;
extern mysql_pfs_key_t buf_pool_free_list_mutex_key;
......
......@@ -47,7 +47,7 @@ Created 1/20/1994 Heikki Tuuri
#define INNODB_VERSION_BUGFIX MYSQL_VERSION_PATCH
#ifndef PERCONA_INNODB_VERSION
#define PERCONA_INNODB_VERSION 71.0
#define PERCONA_INNODB_VERSION 72.0
#endif
/* Enable UNIV_LOG_ARCHIVE in XtraDB */
......
......@@ -128,9 +128,19 @@ UNIV_INTERN enum srv_shutdown_state srv_shutdown_state = SRV_SHUTDOWN_NONE;
static os_file_t files[1000];
/** io_handler_thread parameters for thread identification */
static ulint n[SRV_MAX_N_IO_THREADS + 6];
/** io_handler_thread identifiers, 32 is the maximum number of purge threads */
static os_thread_id_t thread_ids[SRV_MAX_N_IO_THREADS + 6
static ulint n[SRV_MAX_N_IO_THREADS];
/** io_handler_thread identifiers, 32 is the maximum number of purge threads.
The extra elements at the end are allocated as follows:
SRV_MAX_N_IO_THREADS + 1: srv_master_thread
SRV_MAX_N_IO_THREADS + 2: lock_wait_timeout_thread
SRV_MAX_N_IO_THREADS + 3: srv_error_monitor_thread
SRV_MAX_N_IO_THREADS + 4: srv_monitor_thread
SRV_MAX_N_IO_THREADS + 5: srv_redo_log_follow_thread
SRV_MAX_N_IO_THREADS + 6: srv_purge_coordinator_thread
SRV_MAX_N_IO_THREADS + 7: srv_worker_thread
...
SRV_MAX_N_IO_THREADS + 7 + srv_n_purge_threads - 1: srv_worker_thread */
static os_thread_id_t thread_ids[SRV_MAX_N_IO_THREADS + 7
+ SRV_MAX_N_PURGE_THREADS];
/** We use this mutex to test the return value of pthread_mutex_trylock
......@@ -1842,6 +1852,7 @@ innobase_start_or_create_for_mysql(void)
+ 1 /* srv_error_monitor_thread */
+ 1 /* srv_monitor_thread */
+ 1 /* srv_master_thread */
+ 1 /* srv_redo_log_follow_thread */
+ 1 /* srv_purge_coordinator_thread */
+ 1 /* buf_dump_thread */
+ 1 /* dict_stats_thread */
......@@ -2729,16 +2740,16 @@ innobase_start_or_create_for_mysql(void)
os_thread_create(
srv_purge_coordinator_thread,
NULL, thread_ids + 5 + SRV_MAX_N_IO_THREADS);
NULL, thread_ids + 6 + SRV_MAX_N_IO_THREADS);
ut_a(UT_ARR_SIZE(thread_ids)
> 5 + srv_n_purge_threads + SRV_MAX_N_IO_THREADS);
> 6 + srv_n_purge_threads + SRV_MAX_N_IO_THREADS);
/* We've already created the purge coordinator thread above. */
for (i = 1; i < srv_n_purge_threads; ++i) {
os_thread_create(
srv_worker_thread, NULL,
thread_ids + 5 + i + SRV_MAX_N_IO_THREADS);
thread_ids + 6 + i + SRV_MAX_N_IO_THREADS);
}
srv_start_wait_for_purge_to_start();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment