Commit 83212632 authored by Krunal Bauskar's avatar Krunal Bauskar

MDEV-27935: Enable performance_schema profiling for trx_rseg_t latch

- In 10.6, trx_rseg_t mutex was ported to use latch. As part of this porting
  profiling of the patch was removed. This patch reenables it given that
  the said latch continues to occupy the top-slots in the contention list.
parent 164a6aa4
......@@ -12,6 +12,7 @@ wait/synch/rwlock/innodb/fil_space_latch
wait/synch/rwlock/innodb/lock_latch
wait/synch/rwlock/innodb/trx_i_s_cache_lock
wait/synch/rwlock/innodb/trx_purge_latch
wait/synch/rwlock/innodb/trx_rseg_latch
TRUNCATE TABLE performance_schema.events_waits_history_long;
TRUNCATE TABLE performance_schema.events_waits_history;
TRUNCATE TABLE performance_schema.events_waits_current;
......@@ -36,6 +37,7 @@ WHERE event_name LIKE 'wait/synch/rwlock/innodb/%'
AND event_name NOT IN
('wait/synch/rwlock/innodb/btr_search_latch',
'wait/synch/rwlock/innodb/dict_operation_lock',
'wait/synch/rwlock/innodb/trx_rseg_latch',
'wait/synch/rwlock/innodb/trx_purge_latch')
ORDER BY event_name;
event_name
......
......@@ -49,6 +49,7 @@ WHERE event_name LIKE 'wait/synch/rwlock/innodb/%'
AND event_name NOT IN
('wait/synch/rwlock/innodb/btr_search_latch',
'wait/synch/rwlock/innodb/dict_operation_lock',
'wait/synch/rwlock/innodb/trx_rseg_latch',
'wait/synch/rwlock/innodb/trx_purge_latch')
ORDER BY event_name;
......
......@@ -595,6 +595,7 @@ mysql_pfs_key_t fil_space_latch_key;
mysql_pfs_key_t trx_i_s_cache_lock_key;
mysql_pfs_key_t trx_purge_latch_key;
mysql_pfs_key_t lock_latch_key;
mysql_pfs_key_t trx_rseg_latch_key;
/* all_innodb_rwlocks array contains rwlocks that are
performance schema instrumented if "UNIV_PFS_RWLOCK"
......@@ -609,6 +610,7 @@ static PSI_rwlock_info all_innodb_rwlocks[] =
{ &trx_i_s_cache_lock_key, "trx_i_s_cache_lock", 0 },
{ &trx_purge_latch_key, "trx_purge_latch", 0 },
{ &lock_latch_key, "lock_latch", 0 },
{ &trx_rseg_latch_key, "trx_rseg_latch", 0 },
{ &index_tree_rw_lock_key, "index_tree_rw_lock", PSI_RWLOCK_FLAG_SX }
};
# endif /* UNIV_PFS_RWLOCK */
......
......@@ -505,6 +505,8 @@ class srw_lock_impl
}
bool rd_lock_try() { return lock.rd_lock_try(); }
bool wr_lock_try() { return lock.wr_lock_try(); }
void lock_shared() { return rd_lock(SRW_LOCK_CALL); }
void unlock_shared() { return rd_unlock(); }
#ifndef SUX_LOCK_GENERIC
/** @return whether any lock may be held by any thread */
bool is_locked_or_waiting() const noexcept
......
......@@ -86,7 +86,7 @@ struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) trx_rseg_t
/** tablespace containing the rollback segment; constant after init() */
fil_space_t *space;
/** latch protecting everything except page_no, space */
srw_spin_lock_low latch;
srw_spin_lock latch;
/** rollback segment header page number; constant after init() */
uint32_t page_no;
/** length of the TRX_RSEG_HISTORY list (number of transactions) */
......
......@@ -577,5 +577,6 @@ extern mysql_pfs_key_t index_tree_rw_lock_key;
extern mysql_pfs_key_t index_online_log_key;
extern mysql_pfs_key_t trx_sys_rw_lock_key;
extern mysql_pfs_key_t lock_latch_key;
extern mysql_pfs_key_t trx_rseg_latch_key;
# endif /* UNIV_PFS_RWLOCK */
#endif /* HAVE_PSI_INTERFACE */
......@@ -121,7 +121,7 @@ TRANSACTIONAL_INLINE inline bool TrxUndoRsegsIterator::set_next()
#ifdef SUX_LOCK_GENERIC
purge_sys.rseg->latch.rd_lock();
#else
transactional_shared_lock_guard<srw_spin_lock_low> rg
transactional_shared_lock_guard<srw_spin_lock> rg
{purge_sys.rseg->latch};
#endif
last_trx_no = purge_sys.rseg->last_trx_no();
......@@ -367,7 +367,7 @@ static void trx_purge_free_segment(trx_rseg_t *rseg, fil_addr_t hdr_addr)
/* We only need the latch to maintain rseg->curr_size. To follow the
latching order, we must acquire it before acquiring any related
page latch. */
rseg->latch.wr_lock();
rseg->latch.wr_lock(SRW_LOCK_CALL);
buf_block_t* rseg_hdr = trx_rsegf_get(rseg->space, rseg->page_no, &mtr);
buf_block_t* block = trx_undo_page_get(hdr_page_id, &mtr);
......@@ -387,7 +387,7 @@ static void trx_purge_free_segment(trx_rseg_t *rseg, fil_addr_t hdr_addr)
rseg->latch.wr_unlock();
mtr.commit();
mtr.start();
rseg->latch.wr_lock();
rseg->latch.wr_lock(SRW_LOCK_CALL);
rseg_hdr = trx_rsegf_get(rseg->space, rseg->page_no, &mtr);
......@@ -449,7 +449,7 @@ trx_purge_truncate_rseg_history(
mtr.start();
ut_ad(rseg.is_persistent());
rseg.latch.wr_lock();
rseg.latch.wr_lock(SRW_LOCK_CALL);
buf_block_t* rseg_hdr = trx_rsegf_get(rseg.space, rseg.page_no, &mtr);
......@@ -511,7 +511,7 @@ trx_purge_truncate_rseg_history(
}
mtr.start();
rseg.latch.wr_lock();
rseg.latch.wr_lock(SRW_LOCK_CALL);
rseg_hdr = trx_rsegf_get(rseg.space, rseg.page_no, &mtr);
......@@ -638,7 +638,7 @@ TRANSACTIONAL_TARGET static void trx_purge_truncate_history()
#ifdef SUX_LOCK_GENERIC
rseg.latch.rd_lock();
#else
transactional_shared_lock_guard<srw_spin_lock_low> g{rseg.latch};
transactional_shared_lock_guard<srw_spin_lock> g{rseg.latch};
#endif
ut_ad(rseg.skip_allocation());
if (rseg.is_referenced())
......@@ -849,7 +849,7 @@ static void trx_purge_rseg_get_next_history_log(
mtr.start();
purge_sys.rseg->latch.wr_lock();
purge_sys.rseg->latch.wr_lock(SRW_LOCK_CALL);
ut_a(purge_sys.rseg->last_page_no != FIL_NULL);
......@@ -901,7 +901,7 @@ static void trx_purge_rseg_get_next_history_log(
mtr.commit();
purge_sys.rseg->latch.wr_lock();
purge_sys.rseg->latch.wr_lock(SRW_LOCK_CALL);
purge_sys.rseg->last_page_no = prev_log_addr.page;
purge_sys.rseg->set_last_commit(prev_log_addr.boffset, trx_no);
......
......@@ -2129,7 +2129,7 @@ trx_undo_report_row_operation(
mtr.set_log_mode(MTR_LOG_NO_REDO);
}
rseg->latch.wr_lock();
rseg->latch.wr_lock(SRW_LOCK_CALL);
trx_undo_free_last_page(undo, &mtr);
rseg->latch.wr_unlock();
......
......@@ -380,7 +380,7 @@ void trx_rseg_t::destroy()
void trx_rseg_t::init(fil_space_t *space, uint32_t page)
{
latch.init();
latch.SRW_LOCK_INIT(trx_rseg_latch_key);
ut_ad(!this->space);
this->space= space;
page_no= page;
......
......@@ -212,7 +212,7 @@ uint32_t trx_sys_t::history_size()
uint32_t size= 0;
for (auto &rseg : rseg_array)
{
rseg.latch.rd_lock();
rseg.latch.rd_lock(SRW_LOCK_CALL);
size+= rseg.history_size;
}
for (auto &rseg : rseg_array)
......@@ -228,7 +228,7 @@ bool trx_sys_t::history_exceeds(uint32_t threshold)
size_t i;
for (i= 0; i < array_elements(rseg_array); i++)
{
rseg_array[i].latch.rd_lock();
rseg_array[i].latch.rd_lock(SRW_LOCK_CALL);
size+= rseg_array[i].history_size;
if (size > threshold)
{
......
......@@ -1039,7 +1039,7 @@ trx_write_serialisation_history(
ut_ad(!trx->read_only);
ut_ad(!undo || undo->rseg == rseg);
rseg->latch.wr_lock();
rseg->latch.wr_lock(SRW_LOCK_CALL);
/* Assign the transaction serialisation number and add any
undo log to the purge queue. */
......
......@@ -558,7 +558,7 @@ buf_block_t* trx_undo_add_page(trx_undo_t* undo, mtr_t* mtr)
a pessimistic insert in a B-tree, and we must reserve the
counterpart of the tree latch, which is the rseg mutex. */
rseg->latch.wr_lock();
rseg->latch.wr_lock(SRW_LOCK_CALL);
buf_block_t* header_block = trx_undo_page_get(
page_id_t(undo->rseg->space->id, undo->hdr_page_no), mtr);
......@@ -679,7 +679,7 @@ void trx_undo_truncate_end(trx_undo_t& undo, undo_no_t limit, bool is_temp)
}
trx_undo_rec_t* trunc_here = NULL;
undo.rseg->latch.wr_lock();
undo.rseg->latch.wr_lock(SRW_LOCK_CALL);
buf_block_t* undo_block = trx_undo_page_get(
page_id_t(undo.rseg->space->id, undo.last_page_no),
&mtr);
......@@ -1160,7 +1160,7 @@ trx_undo_assign(trx_t* trx, dberr_t* err, mtr_t* mtr)
trx_rseg_t* rseg = trx->rsegs.m_redo.rseg;
rseg->latch.wr_lock();
rseg->latch.wr_lock(SRW_LOCK_CALL);
buf_block_t* block = trx_undo_reuse_cached(
trx, rseg, &trx->rsegs.m_redo.undo, mtr);
......@@ -1216,7 +1216,7 @@ trx_undo_assign_low(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
*err = DB_TOO_MANY_CONCURRENT_TRXS; return NULL;
);
rseg->latch.wr_lock();
rseg->latch.wr_lock(SRW_LOCK_CALL);
buf_block_t* block = trx_undo_reuse_cached(trx, rseg, undo, mtr);
......@@ -1310,7 +1310,7 @@ void trx_undo_commit_cleanup(trx_undo_t *undo)
trx_rseg_t* rseg = undo->rseg;
ut_ad(rseg->space == fil_system.temp_space);
rseg->latch.wr_lock();
rseg->latch.wr_lock(SRW_LOCK_CALL);
UT_LIST_REMOVE(rseg->undo_list, undo);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment