Commit 62687801 authored by Sergey Vojtovich's avatar Sergey Vojtovich

tc_active_instances: my_atomic to std::atomic

parent 3b3f9315
......@@ -7831,7 +7831,7 @@ SHOW_VAR status_vars[]= {
{"Subquery_cache_miss", (char*) &subquery_cache_miss, SHOW_LONG},
{"Table_locks_immediate", (char*) &locks_immediate, SHOW_LONG},
{"Table_locks_waited", (char*) &locks_waited, SHOW_LONG},
{"Table_open_cache_active_instances", (char*) &tc_active_instances, SHOW_UINT},
{"Table_open_cache_active_instances", (char*) &show_tc_active_instances, SHOW_SIMPLE_FUNC},
{"Table_open_cache_hits", (char*) offsetof(STATUS_VAR, table_open_cache_hits), SHOW_LONGLONG_STATUS},
{"Table_open_cache_misses", (char*) offsetof(STATUS_VAR, table_open_cache_misses), SHOW_LONGLONG_STATUS},
{"Table_open_cache_overflows", (char*) offsetof(STATUS_VAR, table_open_cache_overflows), SHOW_LONGLONG_STATUS},
......
......@@ -56,7 +56,7 @@
ulong tdc_size; /**< Table definition cache threshold for LRU eviction. */
ulong tc_size; /**< Table cache threshold for LRU eviction. */
uint32 tc_instances;
uint32 tc_active_instances= 1;
static std::atomic<uint32_t> tc_active_instances(1);
static std::atomic<bool> tc_contention_warning_reported;
/** Data collections. */
......@@ -163,7 +163,7 @@ struct Table_cache_instance
overhead on TABLE object release. All other table cache mutex acquistions
are considered out of hot path and are not instrumented either.
*/
void lock_and_check_contention(uint32 n_instances, uint32 instance)
void lock_and_check_contention(uint32_t n_instances, uint32_t instance)
{
if (mysql_mutex_trylock(&LOCK_table_cache))
{
......@@ -172,11 +172,10 @@ struct Table_cache_instance
{
if (n_instances < tc_instances)
{
if (my_atomic_cas32_weak_explicit((int32*) &tc_active_instances,
(int32*) &n_instances,
(int32) n_instances + 1,
MY_MEMORY_ORDER_RELAXED,
MY_MEMORY_ORDER_RELAXED))
if (tc_active_instances.
compare_exchange_weak(n_instances, n_instances + 1,
std::memory_order_relaxed,
std::memory_order_relaxed))
{
sql_print_information("Detected table cache mutex contention at instance %d: "
"%d%% waits. Additional table cache instance "
......@@ -354,8 +353,8 @@ void tc_purge(bool mark_flushed)
void tc_add_table(THD *thd, TABLE *table)
{
uint32 i= thd->thread_id % my_atomic_load32_explicit((int32*) &tc_active_instances,
MY_MEMORY_ORDER_RELAXED);
uint32_t i=
thd->thread_id % tc_active_instances.load(std::memory_order_relaxed);
TABLE *LRU_table= 0;
TDC_element *element= table->s->tdc;
......@@ -408,10 +407,8 @@ void tc_add_table(THD *thd, TABLE *table)
TABLE *tc_acquire_table(THD *thd, TDC_element *element)
{
uint32 n_instances=
my_atomic_load32_explicit((int32*) &tc_active_instances,
MY_MEMORY_ORDER_RELAXED);
uint32 i= thd->thread_id % n_instances;
uint32_t n_instances= tc_active_instances.load(std::memory_order_relaxed);
uint32_t i= thd->thread_id % n_instances;
TABLE *table;
tc[i].lock_and_check_contention(n_instances, i);
......@@ -1342,3 +1339,14 @@ int tdc_iterate(THD *thd, my_hash_walk_action action, void *argument,
}
return res;
}
int show_tc_active_instances(THD *thd, SHOW_VAR *var, char *buff,
enum enum_var_type scope)
{
var->type= SHOW_UINT;
var->value= buff;
*(reinterpret_cast<uint32_t*>(buff))=
tc_active_instances.load(std::memory_order_relaxed);
return 0;
}
......@@ -71,7 +71,6 @@ enum enum_tdc_remove_table_type
extern ulong tdc_size;
extern ulong tc_size;
extern uint32 tc_instances;
extern uint32 tc_active_instances;
extern bool tdc_init(void);
extern void tdc_start_shutdown(void);
......@@ -98,6 +97,8 @@ extern int tdc_iterate(THD *thd, my_hash_walk_action action, void *argument,
bool no_dups= false);
extern uint tc_records(void);
int show_tc_active_instances(THD *thd, SHOW_VAR *var, char *buff,
enum enum_var_type scope);
extern void tc_purge(bool mark_flushed= false);
extern void tc_add_table(THD *thd, TABLE *table);
extern void tc_release_table(TABLE *table);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment