Commit 63dd2a97 authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-24167: Replace trx_i_s_cache_lock

parent c561f9e6
......@@ -12,7 +12,6 @@ wait/synch/sxlock/innodb/fil_space_latch
wait/synch/sxlock/innodb/fts_cache_init_rw_lock
wait/synch/sxlock/innodb/fts_cache_rw_lock
wait/synch/sxlock/innodb/index_tree_rw_lock
wait/synch/sxlock/innodb/trx_i_s_cache_lock
wait/synch/sxlock/innodb/trx_purge_latch
select name from performance_schema.rwlock_instances
where name in
......@@ -26,7 +25,6 @@ where name in
order by name;
name
wait/synch/sxlock/innodb/dict_operation_lock
wait/synch/sxlock/innodb/trx_i_s_cache_lock
wait/synch/sxlock/innodb/trx_purge_latch
drop table if exists t1;
create table t1(a int) engine=innodb;
......
......@@ -568,7 +568,7 @@ static PSI_rwlock_info all_innodb_rwlocks[] = {
PSI_RWLOCK_KEY(fil_space_latch),
PSI_RWLOCK_KEY(fts_cache_rw_lock),
PSI_RWLOCK_KEY(fts_cache_init_rw_lock),
PSI_RWLOCK_KEY(trx_i_s_cache_lock),
{ &trx_i_s_cache_lock_key, "trx_i_s_cache_lock", 0 },
PSI_RWLOCK_KEY(trx_purge_latch),
PSI_RWLOCK_KEY(index_tree_rw_lock),
};
......
......@@ -239,8 +239,6 @@ enum latch_level_t {
SYNC_DICT_OPERATION,
SYNC_TRX_I_S_RWLOCK,
/** Level is varying. Only used with buffer pool page locks, which
do not have a fixed level, but instead have their level set after
the page is locked; see e.g. ibuf_bitmap_get_map_page(). */
......@@ -298,7 +296,6 @@ enum latch_id_t {
LATCH_ID_FIL_SPACE,
LATCH_ID_FTS_CACHE,
LATCH_ID_FTS_CACHE_INIT,
LATCH_ID_TRX_I_S_CACHE,
LATCH_ID_TRX_PURGE,
LATCH_ID_IBUF_INDEX_TREE,
LATCH_ID_INDEX_TREE,
......
......@@ -495,7 +495,6 @@ LatchDebug::LatchDebug()
LEVEL_MAP_INSERT(SYNC_DICT);
LEVEL_MAP_INSERT(SYNC_FTS_CACHE);
LEVEL_MAP_INSERT(SYNC_DICT_OPERATION);
LEVEL_MAP_INSERT(SYNC_TRX_I_S_RWLOCK);
LEVEL_MAP_INSERT(SYNC_LEVEL_VARYING);
LEVEL_MAP_INSERT(SYNC_NO_ORDER_CHECK);
......@@ -743,7 +742,6 @@ LatchDebug::check_order(
case SYNC_PURGE_QUEUE:
case SYNC_DICT_OPERATION:
case SYNC_DICT_HEADER:
case SYNC_TRX_I_S_RWLOCK:
case SYNC_IBUF_MUTEX:
case SYNC_INDEX_ONLINE_LOG:
case SYNC_STATS_AUTO_RECALC:
......@@ -1320,9 +1318,6 @@ sync_latch_meta_init()
LATCH_ADD_RWLOCK(FTS_CACHE_INIT, SYNC_FTS_CACHE_INIT,
fts_cache_init_rw_lock_key);
LATCH_ADD_RWLOCK(TRX_I_S_CACHE, SYNC_TRX_I_S_RWLOCK,
trx_i_s_cache_lock_key);
LATCH_ADD_RWLOCK(TRX_PURGE, SYNC_PURGE_LATCH, trx_purge_latch_key);
LATCH_ADD_RWLOCK(IBUF_INDEX_TREE, SYNC_IBUF_INDEX_TREE,
......
......@@ -139,8 +139,7 @@ struct i_s_table_cache_t {
/** This structure describes the intermediate buffer */
struct trx_i_s_cache_t {
rw_lock_t rw_lock; /*!< read-write lock protecting
the rest of this structure */
srw_lock rw_lock; /*!< read-write lock protecting this */
Atomic_relaxed<ulonglong> last_read;
/*!< last time the cache was read;
measured in nanoseconds */
......@@ -1138,9 +1137,6 @@ static bool can_cache_be_updated(trx_i_s_cache_t* cache)
we are currently holding an exclusive rw lock on the cache.
So it is not possible for last_read to be updated while we are
reading it. */
ut_ad(rw_lock_own(&cache->rw_lock, RW_LOCK_X));
return my_interval_timer() - cache->last_read > CACHE_MIN_IDLE_TIME_NS;
}
......@@ -1260,15 +1256,14 @@ trx_i_s_cache_init(
trx_i_s_cache_t* cache) /*!< out: cache to init */
{
/* The latching is done in the following order:
acquire trx_i_s_cache_t::rw_lock, X
acquire trx_i_s_cache_t::rw_lock, rwlock
acquire lock mutex
release lock mutex
release trx_i_s_cache_t::rw_lock
acquire trx_i_s_cache_t::rw_lock, S
acquire trx_i_s_cache_t::rw_lock, rdlock
release trx_i_s_cache_t::rw_lock */
rw_lock_create(trx_i_s_cache_lock_key, &cache->rw_lock,
SYNC_TRX_I_S_RWLOCK);
cache->rw_lock.init(trx_i_s_cache_lock_key);
cache->last_read = 0;
......@@ -1294,7 +1289,7 @@ trx_i_s_cache_free(
/*===============*/
trx_i_s_cache_t* cache) /*!< in, own: cache to free */
{
rw_lock_free(&cache->rw_lock);
cache->rw_lock.destroy();
cache->locks_hash.free();
ha_storage_free(cache->storage);
......@@ -1310,7 +1305,7 @@ trx_i_s_cache_start_read(
/*=====================*/
trx_i_s_cache_t* cache) /*!< in: cache */
{
rw_lock_s_lock(&cache->rw_lock);
cache->rw_lock.rd_lock();
}
/*******************************************************************//**
......@@ -1321,7 +1316,7 @@ trx_i_s_cache_end_read(
trx_i_s_cache_t* cache) /*!< in: cache */
{
cache->last_read = my_interval_timer();
rw_lock_s_unlock(&cache->rw_lock);
cache->rw_lock.rd_unlock();
}
/*******************************************************************//**
......@@ -1331,7 +1326,7 @@ trx_i_s_cache_start_write(
/*======================*/
trx_i_s_cache_t* cache) /*!< in: cache */
{
rw_lock_x_lock(&cache->rw_lock);
cache->rw_lock.wr_lock();
}
/*******************************************************************//**
......@@ -1341,9 +1336,7 @@ trx_i_s_cache_end_write(
/*====================*/
trx_i_s_cache_t* cache) /*!< in: cache */
{
ut_ad(rw_lock_own(&cache->rw_lock, RW_LOCK_X));
rw_lock_x_unlock(&cache->rw_lock);
cache->rw_lock.wr_unlock();
}
/*******************************************************************//**
......@@ -1356,9 +1349,6 @@ cache_select_table(
trx_i_s_cache_t* cache, /*!< in: whole cache */
enum i_s_table table) /*!< in: which table */
{
ut_ad(rw_lock_own_flagged(&cache->rw_lock,
RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
switch (table) {
case I_S_INNODB_TRX:
return &cache->innodb_trx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment