Commit b56fce41 authored by Marko Mäkelä's avatar Marko Mäkelä

SHOW ENGINE INNODB MUTEX and INFORMATION_SCHEMA.INNODB_MUTEXES

parent 6490e99e
......@@ -364,7 +364,7 @@ SPACE NAME ENCRYPTION_SCHEME KEYSERVER_REQUESTS MIN_KEY_VERSION CURRENT_KEY_VERS
Warnings:
Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.innodb_tablespaces_encryption but the InnoDB storage engine is not installed
select * from information_schema.innodb_mutexes;
NAME CREATE_FILE CREATE_LINE OS_WAITS
NAME OS_WAITS
Warnings:
Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.innodb_mutexes but the InnoDB storage engine is not installed
select * from information_schema.innodb_sys_semaphore_waits;
......
......@@ -2,7 +2,5 @@ SHOW CREATE TABLE INFORMATION_SCHEMA.INNODB_MUTEXES;
Table Create Table
INNODB_MUTEXES CREATE TEMPORARY TABLE `INNODB_MUTEXES` (
`NAME` varchar(4000) NOT NULL DEFAULT '',
`CREATE_FILE` varchar(4000) NOT NULL DEFAULT '',
`CREATE_LINE` int(11) unsigned NOT NULL DEFAULT 0,
`OS_WAITS` bigint(21) unsigned NOT NULL DEFAULT 0
) ENGINE=MEMORY DEFAULT CHARSET=utf8
......@@ -15838,103 +15838,37 @@ innodb_show_mutex_status(
DBUG_RETURN(0);
}
/** Implements the SHOW MUTEX STATUS command.
@param[in,out] hton the innodb handlerton
@param[in,out] thd the MySQL query thread of the caller
@param[in,out] stat_print function for printing statistics
/** Implement SHOW ENGINE INNODB MUTEX for rw-locks.
@param hton the innodb handlerton
@param thd connection
@param fn function for printing statistics
@return 0 on success. */
static
int
innodb_show_rwlock_status(
handlerton*
#ifdef DBUG_ASSERT_EXISTS
hton
#endif
,
THD* thd,
stat_print_fn* stat_print)
innodb_show_rwlock_status(handlerton* ut_d(hton), THD *thd, stat_print_fn *fn)
{
DBUG_ENTER("innodb_show_rwlock_status");
DBUG_ASSERT(hton == innodb_hton_ptr);
#if 0 // FIXME
const block_lock* block_rwlock= nullptr;
ulint block_rwlock_oswait_count = 0;
uint hton_name_len = (uint) strlen(innobase_hton_name);
mutex_enter(&dict_sys.mutex);
for (const block_lock& rw_lock : rw_lock_list) {
if (rw_lock.count_os_wait == 0) {
continue;
}
int buf1len;
char buf1[IO_SIZE];
if (rw_lock.is_block_lock) {
block_rwlock = &rw_lock;
block_rwlock_oswait_count += rw_lock.count_os_wait;
continue;
}
buf1len = snprintf(
buf1, sizeof buf1, "rwlock: %s:%u",
innobase_basename(rw_lock.cfile_name),
rw_lock.cline);
int buf2len;
char buf2[IO_SIZE];
buf2len = snprintf(
buf2, sizeof buf2, "waits=%u",
rw_lock.count_os_wait);
if (stat_print(thd, innobase_hton_name,
hton_name_len,
buf1, static_cast<uint>(buf1len),
buf2, static_cast<uint>(buf2len))) {
mutex_exit(&dict_sys.mutex);
DBUG_RETURN(1);
}
}
if (block_rwlock != NULL) {
int buf1len;
char buf1[IO_SIZE];
DBUG_ENTER("innodb_show_rwlock_status");
ut_ad(hton == innodb_hton_ptr);
buf1len = snprintf(
buf1, sizeof buf1, "sum rwlock: %s:%u",
innobase_basename(block_rwlock->cfile_name),
block_rwlock->cline);
constexpr size_t prefix_len= sizeof "waits=" - 1;
char waits[prefix_len + 20 + 1];
snprintf(waits, sizeof waits, "waits=" UINT64PF, buf_pool.waited());
int buf2len;
char buf2[IO_SIZE];
if (fn(thd, STRING_WITH_LEN(innobase_hton_name),
STRING_WITH_LEN("buf_block_t::lock"), waits, strlen(waits)))
DBUG_RETURN(1);
buf2len = snprintf(
buf2, sizeof buf2, "waits=" ULINTPF,
block_rwlock_oswait_count);
if (stat_print(thd, innobase_hton_name,
hton_name_len,
buf1, static_cast<uint>(buf1len),
buf2, static_cast<uint>(buf2len))) {
mutex_exit(&dict_sys.mutex);
DBUG_RETURN(1);
}
}
mutex_exit(&dict_sys.mutex);
#endif
DBUG_RETURN(0);
DBUG_RETURN(!dict_sys.for_each_index([&](const dict_index_t &i)
{
uint32_t waited= i.lock.waited();
if (!waited)
return true;
snprintf(waits + prefix_len, sizeof waits - prefix_len, "%u", waited);
std::ostringstream s;
s << i.name << '(' << i.table->name << ')';
return !fn(thd, STRING_WITH_LEN(innobase_hton_name),
s.str().data(), s.str().size(), waits, strlen(waits));
}));
}
/** Implements the SHOW MUTEX STATUS command.
......
......@@ -6885,26 +6885,15 @@ namespace Show {
/* Fields of the dynamic table INFORMATION_SCHEMA.INNODB_MUTEXES */
static ST_FIELD_INFO innodb_mutexes_fields_info[] =
{
#define MUTEXES_NAME 0
Column("NAME", Varchar(OS_FILE_MAX_PATH), NOT_NULL),
#define MUTEXES_CREATE_FILE 1
Column("CREATE_FILE", Varchar(OS_FILE_MAX_PATH), NOT_NULL),
#define MUTEXES_CREATE_LINE 2
Column("CREATE_LINE", ULong(), NOT_NULL),
#define MUTEXES_OS_WAITS 3
Column("OS_WAITS", ULonglong(), NOT_NULL),
CEnd()
};
} // namespace Show
/*******************************************************************//**
Function to populate INFORMATION_SCHEMA.INNODB_MUTEXES table.
Loop through each record in mutex and rw_lock lists, and extract the column
information and fill the INFORMATION_SCHEMA.INNODB_MUTEXES table.
@see innodb_show_rwlock_status
@return 0 on success */
static
int
......@@ -6914,77 +6903,34 @@ i_s_innodb_mutexes_fill_table(
TABLE_LIST* tables, /*!< in/out: tables to fill */
Item* ) /*!< in: condition (not used) */
{
DBUG_ENTER("i_s_innodb_mutexes_fill_table");
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name.str);
/* deny access to user without PROCESS_ACL privilege */
if (check_global_access(thd, PROCESS_ACL)) {
DBUG_RETURN(0);
}
#if 0 // FIXME
ulint block_lock_oswait_count = 0;
const block_lock* block_lock= nullptr;
Field** fields = tables->table->field;
struct Locking
{
Locking() { mutex_enter(&dict_sys.mutex); }
~Locking() { mutex_exit(&dict_sys.mutex); }
} locking;
char lock_name[sizeof "buf0dump.cc:12345"];
for (const block_lock& lock : rw_lock_list) {
if (lock.count_os_wait == 0) {
continue;
}
DBUG_ENTER("i_s_innodb_mutexes_fill_table");
RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name.str);
if (buf_pool.is_block_lock(&lock)) {
block_lock = &lock;
block_lock_oswait_count += lock.count_os_wait;
continue;
}
if (check_global_access(thd, PROCESS_ACL))
DBUG_RETURN(0);
const char* basename = innobase_basename(
lock.cfile_name);
snprintf(lock_name, sizeof lock_name, "%s:%u",
basename, lock.cline);
OK(field_store_string(fields[MUTEXES_NAME],
lock_name));
OK(field_store_string(fields[MUTEXES_CREATE_FILE],
basename));
OK(fields[MUTEXES_CREATE_LINE]->store(lock.cline,
true));
fields[MUTEXES_CREATE_LINE]->set_notnull();
OK(fields[MUTEXES_OS_WAITS]->store(lock.count_os_wait,
true));
fields[MUTEXES_OS_WAITS]->set_notnull();
OK(schema_table_store_record(thd, tables->table));
}
Field **fields= tables->table->field;
OK(fields[0]->store(STRING_WITH_LEN("buf_block_t::lock"),
system_charset_info));
OK(fields[1]->store(buf_pool.waited(), true));
fields[0]->set_notnull();
fields[1]->set_notnull();
if (block_lock) {
char buf1[IO_SIZE];
snprintf(buf1, sizeof buf1, "combined %s",
innobase_basename(block_lock->cfile_name));
OK(field_store_string(fields[MUTEXES_NAME],
"buf_block_t::lock"));
OK(field_store_string(fields[MUTEXES_CREATE_FILE],
buf1));
OK(fields[MUTEXES_CREATE_LINE]->store(block_lock->cline,
true));
fields[MUTEXES_CREATE_LINE]->set_notnull();
OK(fields[MUTEXES_OS_WAITS]->store(
block_lock_oswait_count, true));
fields[MUTEXES_OS_WAITS]->set_notnull();
OK(schema_table_store_record(thd, tables->table));
}
#endif
OK(schema_table_store_record(thd, tables->table));
DBUG_RETURN(0);
DBUG_RETURN(!dict_sys.for_each_index([&](const dict_index_t &i)
{
uint32_t waited= i.lock.waited();
if (!waited)
return true;
if (fields[1]->store(waited, true))
return false;
std::ostringstream s;
s << i.name << '(' << i.table->name << ')';
return !fields[0]->store(s.str().data(), s.str().size(),
system_charset_info) &&
!schema_table_store_record(thd, tables->table);
}));
}
/*******************************************************************//**
......
......@@ -1326,6 +1326,16 @@ class buf_pool_t
@return whether the allocation succeeded */
inline bool create(size_t bytes);
/** @return sum of buf_block_t::lock::waited() */
uint64_t waited() const
{
uint64_t total_waited= 0;
for (const buf_block_t *block= blocks, * const end= blocks + size;
block != end; block++)
total_waited+= block->lock.waited();
return total_waited;
}
#ifdef UNIV_DEBUG
/** Find a block that points to a ROW_FORMAT=COMPRESSED page
@param data pointer to the start of a ROW_FORMAT=COMPRESSED page frame
......@@ -1403,6 +1413,19 @@ class buf_pool_t
return size;
}
/** @return sum of buf_block_t::lock::waited() */
uint64_t waited()
{
ut_ad(is_initialised());
uint64_t waited_count= 0;
page_hash.read_lock_all(); /* prevent any race with resize() */
for (const chunk_t *chunk= chunks, * const end= chunks + n_chunks;
chunk != end; chunk++)
waited_count+= chunks->waited();
page_hash.read_unlock_all();
return waited_count;
}
/** Determine whether a frame is intended to be withdrawn during resize().
@param ptr pointer within a buf_block_t::frame
@return whether the frame will be withdrawn */
......@@ -1410,7 +1433,7 @@ class buf_pool_t
{
ut_ad(curr_size < old_size);
#ifdef SAFE_MUTEX
if (resizing.load(std::memory_order_relaxed))
if (resize_in_progress())
mysql_mutex_assert_owner(&mutex);
#endif /* SAFE_MUTEX */
......@@ -1430,7 +1453,7 @@ class buf_pool_t
{
ut_ad(curr_size < old_size);
#ifdef SAFE_MUTEX
if (resizing.load(std::memory_order_relaxed))
if (resize_in_progress())
mysql_mutex_assert_owner(&mutex);
#endif /* SAFE_MUTEX */
......@@ -1804,6 +1827,28 @@ class buf_pool_t
}
}
/** Acquire all latches in shared mode */
void read_lock_all()
{
for (auto n= pad(n_cells) & ~ELEMENTS_PER_LATCH;;
n-= ELEMENTS_PER_LATCH + 1)
{
reinterpret_cast<page_hash_latch&>(array[n]).read_lock();
if (!n)
break;
}
}
/** Release all latches in shared mode */
void read_unlock_all()
{
for (auto n= pad(n_cells) & ~ELEMENTS_PER_LATCH;;
n-= ELEMENTS_PER_LATCH + 1)
{
reinterpret_cast<page_hash_latch&>(array[n]).read_unlock();
if (!n)
break;
}
}
/** Exclusively aqcuire all latches */
inline void write_lock_all();
......
......@@ -1537,7 +1537,8 @@ class dict_sys_t
@param t table
@retval false if f returned false
@retval true if f never returned false */
template<typename F> inline bool for_each_index(const F &f, dict_table_t *t);
template<typename F>
inline bool for_each_index(const F &f, const dict_table_t *t);
public:
/** Invoke f on each index of each persistent table, until it returns false
@retval false if f returned false
......@@ -1611,9 +1612,9 @@ extern dict_sys_t dict_sys;
#define dict_sys_unlock() dict_sys.unlock()
template<typename F>
inline bool dict_sys_t::for_each_index(const F &f, dict_table_t *t)
inline bool dict_sys_t::for_each_index(const F &f, const dict_table_t *t)
{
dict_index_t *i= UT_LIST_GET_FIRST(t->indexes);
const dict_index_t *i= UT_LIST_GET_FIRST(t->indexes);
do
{
if (!i->is_corrupted() && !f(*i))
......
......@@ -78,13 +78,39 @@ class srw_lock_low final : private rw_lock
#endif
bool rd_lock_try() { uint32_t l; return read_trylock(l); }
bool wr_lock_try() { return write_trylock(); }
template<bool update=false>
void rd_lock() { uint32_t l; if (!read_trylock(l)) read_lock(l); }
void u_lock() { uint32_t l; if (!update_trylock(l)) update_lock(l); }
template<bool update= false>
bool rd_lock()
{
uint32_t l;
if (read_trylock(l))
return true;
read_lock(l);
return false;
}
bool u_lock()
{
uint32_t l;
if (update_trylock(l))
return true;
update_lock(l);
return false;
}
bool u_lock_try() { uint32_t l; return update_trylock(l); }
void u_wr_upgrade() { if (!upgrade_trylock()) write_lock(true); }
template<bool update=false>
void wr_lock() { if (!write_trylock()) write_lock(false); }
bool u_wr_upgrade()
{
if (upgrade_trylock())
return true;
write_lock(true);
return false;
}
template<bool update= false>
bool wr_lock()
{
if (write_trylock())
return true;
write_lock(false);
return false;
}
void rd_unlock();
void u_unlock();
void wr_unlock();
......@@ -122,7 +148,7 @@ class srw_lock
lock.destroy();
}
template<bool update= false>
void rd_lock(const char *file, unsigned line)
bool rd_lock(const char *file, unsigned line)
{
if (pfs_psi)
{
......@@ -130,12 +156,12 @@ class srw_lock
PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_rdwait)
(&state, pfs_psi, update ? PSI_RWLOCK_SHAREDLOCK : PSI_RWLOCK_READLOCK,
file, line);
lock.rd_lock();
bool no_wait= lock.rd_lock();
if (locker)
PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0);
return;
return no_wait;
}
lock.rd_lock();
return lock.rd_lock();
}
void rd_unlock()
{
......@@ -143,19 +169,19 @@ class srw_lock
PSI_RWLOCK_CALL(unlock_rwlock)(pfs_psi);
lock.rd_unlock();
}
void u_lock(const char *file, unsigned line)
bool u_lock(const char *file, unsigned line)
{
if (pfs_psi)
{
PSI_rwlock_locker_state state;
PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_wrwait)
(&state, pfs_psi, PSI_RWLOCK_SHAREDEXCLUSIVELOCK, file, line);
lock.u_lock();
bool no_wait= lock.u_lock();
if (locker)
PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0);
return;
return no_wait;
}
lock.u_lock();
return lock.u_lock();
}
void u_unlock()
{
......@@ -164,7 +190,7 @@ class srw_lock
lock.u_unlock();
}
template<bool update= false>
void wr_lock(const char *file, unsigned line)
bool wr_lock(const char *file, unsigned line)
{
if (pfs_psi)
{
......@@ -173,12 +199,12 @@ class srw_lock
(&state, pfs_psi,
update ? PSI_RWLOCK_EXCLUSIVELOCK : PSI_RWLOCK_WRITELOCK,
file, line);
lock.wr_lock();
bool no_wait= lock.wr_lock();
if (locker)
PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0);
return;
return no_wait;
}
lock.wr_lock();
return lock.wr_lock();
}
void wr_unlock()
{
......@@ -186,19 +212,19 @@ class srw_lock
PSI_RWLOCK_CALL(unlock_rwlock)(pfs_psi);
lock.wr_unlock();
}
void u_wr_upgrade(const char *file, unsigned line)
bool u_wr_upgrade(const char *file, unsigned line)
{
if (pfs_psi)
{
PSI_rwlock_locker_state state;
PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_wrwait)
(&state, pfs_psi, PSI_RWLOCK_EXCLUSIVELOCK, file, line);
lock.u_wr_upgrade();
bool no_wait= lock.u_wr_upgrade();
if (locker)
PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0);
return;
return no_wait;
}
lock.u_wr_upgrade();
return lock.u_wr_upgrade();
}
bool rd_lock_try() { return lock.rd_lock_try(); }
bool u_lock_try() { return lock.u_lock_try(); }
......
......@@ -43,6 +43,8 @@ class sux_lock final
#endif
/** Numbers of U and X locks. Protected by lock. */
uint32_t recursive;
/** Number of blocking waits */
std::atomic<uint32_t> waits;
#ifdef UNIV_DEBUG
/** Protects readers */
mutable srw_mutex readers_lock;
......@@ -67,6 +69,7 @@ class sux_lock final
lock.SRW_LOCK_INIT(key);
ut_ad(!writer.load(std::memory_order_relaxed));
ut_ad(!recursive);
ut_ad(!waits.load(std::memory_order_relaxed));
ut_d(readers_lock.init());
ut_ad(!readers.load(std::memory_order_relaxed));
}
......@@ -88,8 +91,11 @@ class sux_lock final
lock.destroy();
}
/** @return number of blocking waits */
uint32_t waited() const { return waits.load(std::memory_order_relaxed); }
/** needed for dict_index_t::clone() */
void operator=(const sux_lock&) {}
inline void operator=(const sux_lock&);
#ifdef UNIV_DEBUG
/** @return whether no recursive locks are being held */
......@@ -281,6 +287,9 @@ class sux_lock final
};
typedef sux_lock<srw_lock_low> block_lock;
/** needed for dict_index_t::clone() */
template<> inline void sux_lock<srw_lock>::operator=(const sux_lock&) {}
#ifndef UNIV_PFS_RWLOCK
typedef block_lock index_lock;
#else
......@@ -291,6 +300,7 @@ template<> inline void sux_lock<srw_lock_low>::init()
lock.init();
ut_ad(!writer.load(std::memory_order_relaxed));
ut_ad(!recursive);
ut_ad(!waits.load(std::memory_order_relaxed));
ut_d(readers_lock.init());
ut_ad(!readers.load(std::memory_order_relaxed));
}
......@@ -300,7 +310,8 @@ inline void sux_lock<srw_lock>::s_lock(const char *file, unsigned line)
{
ut_ad(!have_x());
ut_ad(!have_s());
lock.template rd_lock<true>(file, line);
if (!lock.template rd_lock<true>(file, line))
waits.fetch_add(1, std::memory_order_relaxed);
ut_d(s_lock_register());
}
......@@ -312,7 +323,8 @@ inline void sux_lock<srw_lock>::u_lock(const char *file, unsigned line)
writer_recurse<true>();
else
{
lock.u_lock(file, line);
if (!lock.u_lock(file, line))
waits.fetch_add(1, std::memory_order_relaxed);
ut_ad(!recursive);
recursive= RECURSIVE_U;
set_first_owner(id);
......@@ -327,7 +339,8 @@ inline void sux_lock<srw_lock>::x_lock(const char *file, unsigned line)
writer_recurse<false>();
else
{
lock.template wr_lock<true>(file, line);
if (!lock.template wr_lock<true>(file, line))
waits.fetch_add(1, std::memory_order_relaxed);
ut_ad(!recursive);
recursive= RECURSIVE_X;
set_first_owner(id);
......@@ -338,7 +351,8 @@ template<>
inline void sux_lock<srw_lock>::u_x_upgrade(const char *file, unsigned line)
{
ut_ad(have_u_not_x());
lock.u_wr_upgrade(file, line);
if (!lock.u_wr_upgrade(file, line))
waits.fetch_add(1, std::memory_order_relaxed);
recursive/= RECURSIVE_U;
}
#endif
......@@ -348,11 +362,11 @@ inline void sux_lock<srw_lock_low>::s_lock()
{
ut_ad(!have_x());
ut_ad(!have_s());
lock.template rd_lock<true>();
if (!lock.template rd_lock<true>())
waits.fetch_add(1, std::memory_order_relaxed);
ut_d(s_lock_register());
}
template<>
inline void sux_lock<srw_lock_low>::u_lock()
{
......@@ -361,7 +375,8 @@ inline void sux_lock<srw_lock_low>::u_lock()
writer_recurse<true>();
else
{
lock.u_lock();
if (!lock.u_lock())
waits.fetch_add(1, std::memory_order_relaxed);
ut_ad(!recursive);
recursive= RECURSIVE_U;
set_first_owner(id);
......@@ -379,7 +394,8 @@ inline void sux_lock<srw_lock_low>::x_lock(bool for_io)
}
else
{
lock.template wr_lock<true>();
if (!lock.template wr_lock<true>())
waits.fetch_add(1, std::memory_order_relaxed);
ut_ad(!recursive);
recursive= RECURSIVE_X;
set_first_owner(for_io ? FOR_IO : id);
......@@ -390,7 +406,8 @@ template<>
inline void sux_lock<srw_lock_low>::u_x_upgrade()
{
ut_ad(have_u_not_x());
lock.u_wr_upgrade();
if (!lock.u_wr_upgrade())
waits.fetch_add(1, std::memory_order_relaxed);
recursive/= RECURSIVE_U;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment