Commit 55db59f1 authored by Meng-Hsiu Chiang's avatar Meng-Hsiu Chiang Committed by Marko Mäkelä

[MDEV-28162] Replace PFS_atomic with std::atomic<T>

PFS_atomic class contains wrappers around my_atomic_* operations, which
are macros to GNU atomic operations (__atomic_*). Due to different
implementations of compilers, clang may encounter errors when compiling
on x86_32 architecture.

The following functions are replaced with C++ std::atomic type in
performance schema code base:
  - PFS_atomic::store_*()
      -> my_atomic_store*
        -> __atomic_store_n()
    => std::atomic<T>::store()

  - PFS_atomic::load_*()
      -> my_atomic_load*
        -> __atomic_load_n()
    => std::atomic<T>::load()

  - PFS_atomic::add_*()
      -> my_atomic_add*
        -> __atomic_fetch_add()
    => std::atomic<T>::fetch_add()

  - PFS_atomic::cas_*()
    -> my_atomic_cas*
      -> __atomic_compare_exchange_n()
    => std::atomic<T>::compare_exchange_strong()

and PFS_atomic class could be dropped completely.

Note that in the wrapper memory order passed to original GNU atomic
extensions are hard-coded as `__ATOMIC_SEQ_CST`, which is equivalent to
`std::memory_order_seq_cst` in C++, and is the default parameter for
std::atomic_* functions.

All new code of the whole pull request, including one or several files
that are either new files or modified ones, are contributed under the
BSD-new license. I am contributing on behalf of my employer Amazon Web
Services.
parent d5bad490
......@@ -49,7 +49,6 @@ cursor_by_thread.h
cursor_by_user.h
pfs.h
pfs_account.h
pfs_atomic.h
pfs_buffer_container.h
pfs_builtin_memory.h
pfs_column_types.h
......
......@@ -27,6 +27,7 @@
@file storage/perfschema/pfs_account.h
Performance schema account (declarations).
*/
#include <atomic>
#include "pfs_lock.h"
#include "lf.h"
......@@ -62,22 +63,22 @@ struct PFS_ALIGNED PFS_account : PFS_connection_slice
public:
inline void init_refcount(void)
{
PFS_atomic::store_32(& m_refcount, 1);
m_refcount.store(1);
}
inline int get_refcount(void)
{
return PFS_atomic::load_32(& m_refcount);
return m_refcount.load();
}
inline void inc_refcount(void)
{
PFS_atomic::add_32(& m_refcount, 1);
m_refcount.fetch_add(1);
}
inline void dec_refcount(void)
{
PFS_atomic::add_32(& m_refcount, -1);
m_refcount.fetch_sub(1);
}
void aggregate(bool alive, PFS_user *safe_user, PFS_host *safe_host);
......@@ -109,7 +110,7 @@ struct PFS_ALIGNED PFS_account : PFS_connection_slice
ulonglong m_disconnected_count;
private:
int m_refcount;
std::atomic<int> m_refcount;
};
int init_account(const PFS_global_param *param);
......
/* Copyright (c) 2009, 2023, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is also distributed with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have included with MySQL.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */
#ifndef PFS_ATOMIC_H
#define PFS_ATOMIC_H
/**
@file storage/perfschema/pfs_atomic.h
Atomic operations (declarations).
*/
#include <my_atomic.h>
/** Helper for atomic operations. */
class PFS_atomic
{
public:
/** Atomic load. */
static inline int32 load_32(int32 *ptr)
{
return my_atomic_load32(ptr);
}
/** Atomic load. */
static inline int64 load_64(int64 *ptr)
{
return my_atomic_load64(ptr);
}
/** Atomic load. */
static inline uint32 load_u32(uint32 *ptr)
{
return (uint32) my_atomic_load32((int32*) ptr);
}
/** Atomic load. */
static inline uint64 load_u64(uint64 *ptr)
{
return (uint64) my_atomic_load64((int64*) ptr);
}
/** Atomic store. */
static inline void store_32(int32 *ptr, int32 value)
{
my_atomic_store32(ptr, value);
}
/** Atomic store. */
static inline void store_64(int64 *ptr, int64 value)
{
my_atomic_store64(ptr, value);
}
/** Atomic store. */
static inline void store_u32(uint32 *ptr, uint32 value)
{
my_atomic_store32((int32*) ptr, (int32) value);
}
/** Atomic store. */
static inline void store_u64(uint64 *ptr, uint64 value)
{
my_atomic_store64((int64*) ptr, (int64) value);
}
/** Atomic add. */
static inline int32 add_32(int32 *ptr, int32 value)
{
return my_atomic_add32(ptr, value);
}
/** Atomic add. */
static inline int64 add_64(int64 *ptr, int64 value)
{
return my_atomic_add64(ptr, value);
}
/** Atomic add. */
static inline uint32 add_u32(uint32 *ptr, uint32 value)
{
return (uint32) my_atomic_add32((int32*) ptr, (int32) value);
}
/** Atomic add. */
static inline uint64 add_u64(uint64 *ptr, uint64 value)
{
return (uint64) my_atomic_add64((int64*) ptr, (int64) value);
}
/** Atomic compare and swap. */
static inline bool cas_32(int32 *ptr, int32 *old_value,
int32 new_value)
{
return my_atomic_cas32(ptr, old_value, new_value);
}
/** Atomic compare and swap. */
static inline bool cas_64(int64 *ptr, int64 *old_value,
int64 new_value)
{
return my_atomic_cas64(ptr, old_value, new_value);
}
/** Atomic compare and swap. */
static inline bool cas_u32(uint32 *ptr, uint32 *old_value,
uint32 new_value)
{
return my_atomic_cas32((int32*) ptr, (int32*) old_value,
(uint32) new_value);
}
/** Atomic compare and swap. */
static inline bool cas_u64(uint64 *ptr, uint64 *old_value,
uint64 new_value)
{
return my_atomic_cas64((int64*) ptr, (int64*) old_value,
(uint64) new_value);
}
};
#endif
......@@ -87,7 +87,7 @@ class PFS_buffer_default_array
if (m_full)
return NULL;
monotonic= PFS_atomic::add_u32(& m_monotonic.m_u32, 1);
monotonic= m_monotonic.m_u32.fetch_add(1);
monotonic_max= monotonic + static_cast<uint>(m_max);
while (monotonic < monotonic_max)
......@@ -99,7 +99,8 @@ class PFS_buffer_default_array
{
return pfs;
}
monotonic= PFS_atomic::add_u32(& m_monotonic.m_u32, 1);
monotonic= m_monotonic.m_u32.fetch_add(1);
}
m_full= true;
......@@ -517,7 +518,7 @@ class PFS_buffer_scalable_container
ulong get_row_count()
{
ulong page_count= PFS_atomic::load_u32(& m_max_page_index.m_u32);
ulong page_count= m_max_page_index.m_u32.load();
return page_count * PFS_PAGE_SIZE;
}
......@@ -554,11 +555,11 @@ class PFS_buffer_scalable_container
/*
1: Try to find an available record within the existing pages
*/
current_page_count= PFS_atomic::load_u32(& m_max_page_index.m_u32);
current_page_count= m_max_page_index.m_u32.load();
if (current_page_count != 0)
{
monotonic= PFS_atomic::load_u32(& m_monotonic.m_u32);
monotonic= m_monotonic.m_u32.load();
monotonic_max= monotonic + current_page_count;
while (monotonic < monotonic_max)
......@@ -602,7 +603,7 @@ class PFS_buffer_scalable_container
counter faster and then move on to the detection of new pages,
in part 2: below.
*/
monotonic= PFS_atomic::add_u32(& m_monotonic.m_u32, 1);
monotonic= m_monotonic.m_u32.fetch_add(1);
};
}
......@@ -683,7 +684,7 @@ class PFS_buffer_scalable_container
my_atomic_storeptr(typed_addr, ptr);
/* Advertise the new page */
PFS_atomic::add_u32(& m_max_page_index.m_u32, 1);
m_max_page_index.m_u32.fetch_add(1);
}
pthread_mutex_unlock(& m_critical_section);
......
......@@ -75,7 +75,7 @@ int init_digest(const PFS_global_param *param)
*/
digest_max= param->m_digest_sizing;
digest_lost= 0;
PFS_atomic::store_u32(& digest_monotonic_index.m_u32, 1);
digest_monotonic_index.m_u32.store(1);
digest_full= false;
if (digest_max == 0)
......@@ -274,7 +274,7 @@ find_or_create_digest(PFS_thread *thread,
while (++attempts <= digest_max)
{
safe_index= PFS_atomic::add_u32(& digest_monotonic_index.m_u32, 1) % digest_max;
safe_index= digest_monotonic_index.m_u32.fetch_add(1) % digest_max;
if (safe_index == 0)
{
/* Record [0] is reserved. */
......@@ -406,7 +406,7 @@ void reset_esms_by_digest()
Reset index which indicates where the next calculated digest information
to be inserted in statements_digest_stat_array.
*/
PFS_atomic::store_u32(& digest_monotonic_index.m_u32, 1);
digest_monotonic_index.m_u32.store(1);
digest_full= false;
}
......@@ -34,7 +34,6 @@
#include "pfs_host.h"
#include "pfs_user.h"
#include "pfs_events_stages.h"
#include "pfs_atomic.h"
#include "pfs_buffer_container.h"
#include "pfs_builtin_memory.h"
#include "m_string.h"
......@@ -62,7 +61,7 @@ int init_events_stages_history_long(uint events_stages_history_long_sizing)
{
events_stages_history_long_size= events_stages_history_long_sizing;
events_stages_history_long_full= false;
PFS_atomic::store_u32(&events_stages_history_long_index.m_u32, 0);
events_stages_history_long_index.m_u32.store(0);
if (events_stages_history_long_size == 0)
return 0;
......@@ -135,7 +134,7 @@ void insert_events_stages_history_long(PFS_events_stages *stage)
assert(events_stages_history_long_array != NULL);
uint index= PFS_atomic::add_u32(&events_stages_history_long_index.m_u32, 1);
uint index= events_stages_history_long_index.m_u32.fetch_add(1);
index= index % events_stages_history_long_size;
if (index == 0)
......@@ -176,7 +175,7 @@ void reset_events_stages_history(void)
/** Reset table EVENTS_STAGES_HISTORY_LONG data. */
void reset_events_stages_history_long(void)
{
PFS_atomic::store_u32(&events_stages_history_long_index.m_u32, 0);
events_stages_history_long_index.m_u32.store(0);
events_stages_history_long_full= false;
PFS_events_stages *pfs= events_stages_history_long_array;
......
......@@ -34,7 +34,6 @@
#include "pfs_host.h"
#include "pfs_user.h"
#include "pfs_events_statements.h"
#include "pfs_atomic.h"
#include "pfs_buffer_container.h"
#include "pfs_builtin_memory.h"
#include "m_string.h"
......@@ -64,7 +63,7 @@ int init_events_statements_history_long(size_t events_statements_history_long_si
{
events_statements_history_long_size= events_statements_history_long_sizing;
events_statements_history_long_full= false;
PFS_atomic::store_u32(&events_statements_history_long_index.m_u32, 0);
events_statements_history_long_index.m_u32.store(0);
if (events_statements_history_long_size == 0)
return 0;
......@@ -213,7 +212,7 @@ void insert_events_statements_history_long(PFS_events_statements *statement)
assert(events_statements_history_long_array != NULL);
uint index= PFS_atomic::add_u32(&events_statements_history_long_index.m_u32, 1);
uint index= events_statements_history_long_index.m_u32.fetch_add(1);
index= index % events_statements_history_long_size;
if (index == 0)
......@@ -258,7 +257,7 @@ void reset_events_statements_history(void)
/** Reset table EVENTS_STATEMENTS_HISTORY_LONG data. */
void reset_events_statements_history_long(void)
{
PFS_atomic::store_u32(&events_statements_history_long_index.m_u32, 0);
events_statements_history_long_index.m_u32.store(0);
events_statements_history_long_full= false;
PFS_events_statements *pfs= events_statements_history_long_array;
......
......@@ -34,7 +34,6 @@
#include "pfs_host.h"
#include "pfs_user.h"
#include "pfs_events_transactions.h"
#include "pfs_atomic.h"
#include "pfs_buffer_container.h"
#include "pfs_builtin_memory.h"
#include "m_string.h"
......@@ -62,7 +61,7 @@ int init_events_transactions_history_long(uint events_transactions_history_long_
{
events_transactions_history_long_size= events_transactions_history_long_sizing;
events_transactions_history_long_full= false;
PFS_atomic::store_u32(&events_transactions_history_long_index.m_u32, 0);
events_transactions_history_long_index.m_u32.store(0);
if (events_transactions_history_long_size == 0)
return 0;
......@@ -135,7 +134,7 @@ void insert_events_transactions_history_long(PFS_events_transactions *transactio
assert(events_transactions_history_long_array != NULL);
uint index= PFS_atomic::add_u32(&events_transactions_history_long_index.m_u32, 1);
uint index= events_transactions_history_long_index.m_u32.fetch_add(1);
index= index % events_transactions_history_long_size;
if (index == 0)
......@@ -176,7 +175,7 @@ void reset_events_transactions_history(void)
/** Reset table EVENTS_TRANSACTIONS_HISTORY_LONG data. */
void reset_events_transactions_history_long(void)
{
PFS_atomic::store_u32(&events_transactions_history_long_index.m_u32, 0);
events_transactions_history_long_index.m_u32.store(0);
events_transactions_history_long_full= false;
PFS_events_transactions *pfs= events_transactions_history_long_array;
......
......@@ -34,7 +34,6 @@
#include "pfs_host.h"
#include "pfs_account.h"
#include "pfs_events_waits.h"
#include "pfs_atomic.h"
#include "pfs_buffer_container.h"
#include "pfs_builtin_memory.h"
#include "m_string.h"
......@@ -66,7 +65,7 @@ int init_events_waits_history_long(uint events_waits_history_long_sizing)
{
events_waits_history_long_size= events_waits_history_long_sizing;
events_waits_history_long_full= false;
PFS_atomic::store_u32(&events_waits_history_long_index.m_u32, 0);
events_waits_history_long_index.m_u32.store(0);
if (events_waits_history_long_size == 0)
return 0;
......@@ -135,7 +134,7 @@ void insert_events_waits_history_long(PFS_events_waits *wait)
if (unlikely(events_waits_history_long_size == 0))
return;
uint index= PFS_atomic::add_u32(&events_waits_history_long_index.m_u32, 1);
uint index= events_waits_history_long_index.m_u32.fetch_add(1);
index= index % events_waits_history_long_size;
if (index == 0)
......@@ -181,7 +180,7 @@ void reset_events_waits_history(void)
/** Reset table EVENTS_WAITS_HISTORY_LONG data. */
void reset_events_waits_history_long(void)
{
PFS_atomic::store_u32(&events_waits_history_long_index.m_u32, 0);
events_waits_history_long_index.m_u32.store(0);
events_waits_history_long_full= false;
PFS_events_waits *wait= events_waits_history_long_array;
......
......@@ -23,6 +23,8 @@
#ifndef PFS_GLOBAL_H
#define PFS_GLOBAL_H
#include <atomic>
#include "my_compiler.h"
/**
......@@ -59,7 +61,7 @@ extern size_t pfs_allocated_memory;
*/
struct PFS_cacheline_uint32
{
uint32 m_u32;
std::atomic<uint32> m_u32;
char m_full_cache_line[PFS_CACHE_LINE_SIZE - sizeof(uint32)];
PFS_cacheline_uint32()
......@@ -73,7 +75,7 @@ struct PFS_cacheline_uint32
*/
struct PFS_cacheline_uint64
{
uint64 m_u64;
std::atomic<uint64> m_u64;
char m_full_cache_line[PFS_CACHE_LINE_SIZE - sizeof(uint64)];
PFS_cacheline_uint64()
......
......@@ -28,6 +28,8 @@
Performance schema host (declarations).
*/
#include <atomic>
#include "pfs_lock.h"
#include "lf.h"
#include "pfs_con_slice.h"
......@@ -58,22 +60,22 @@ struct PFS_ALIGNED PFS_host : PFS_connection_slice
public:
inline void init_refcount(void)
{
PFS_atomic::store_32(& m_refcount, 1);
m_refcount.store(1);
}
inline int get_refcount(void)
{
return PFS_atomic::load_32(& m_refcount);
return m_refcount.load();
}
inline void inc_refcount(void)
{
PFS_atomic::add_32(& m_refcount, 1);
m_refcount.fetch_add(1);
}
inline void dec_refcount(void)
{
PFS_atomic::add_32(& m_refcount, -1);
m_refcount.fetch_sub(1);
}
void aggregate(bool alive);
......@@ -97,7 +99,7 @@ struct PFS_ALIGNED PFS_host : PFS_connection_slice
ulonglong m_disconnected_count;
private:
int m_refcount;
std::atomic<int> m_refcount;
};
int init_host(const PFS_global_param *param);
......
......@@ -526,7 +526,7 @@ PFS_thread* create_thread(PFS_thread_class *klass, const void *identity,
if (pfs != NULL)
{
pfs->m_thread_internal_id=
PFS_atomic::add_u64(&thread_internal_id_counter.m_u64, 1);
thread_internal_id_counter.m_u64.fetch_add(1);
pfs->m_parent_thread_internal_id= 0;
pfs->m_processlist_id= static_cast<ulong>(processlist_id);
pfs->m_thread_os_id= my_thread_os_id();
......
......@@ -25,6 +25,7 @@
@file storage/perfschema/pfs_instr_class.cc
Performance schema instruments meta data (implementation).
*/
#include <atomic>
#include "my_global.h"
#include "my_sys.h"
......@@ -36,7 +37,6 @@
#include "pfs_timer.h"
#include "pfs_events_waits.h"
#include "pfs_setup_object.h"
#include "pfs_atomic.h"
#include "pfs_program.h"
#include "pfs_buffer_container.h"
#include "mysql/psi/mysql_thread.h"
......@@ -76,12 +76,12 @@ static void init_instr_class(PFS_instr_class *klass,
- the performance schema initialization
- a plugin initialization
*/
static uint32 mutex_class_dirty_count= 0;
static uint32 mutex_class_allocated_count= 0;
static uint32 rwlock_class_dirty_count= 0;
static uint32 rwlock_class_allocated_count= 0;
static uint32 cond_class_dirty_count= 0;
static uint32 cond_class_allocated_count= 0;
static std::atomic<uint32> mutex_class_dirty_count(0);
static std::atomic<uint32> mutex_class_allocated_count(0);
static std::atomic<uint32> rwlock_class_dirty_count(0);
static std::atomic<uint32> rwlock_class_allocated_count(0);
static std::atomic<uint32> cond_class_dirty_count(0);
static std::atomic<uint32> cond_class_allocated_count(0);
/** Size of the mutex class array. @sa mutex_class_array */
ulong mutex_class_max= 0;
......@@ -137,8 +137,8 @@ PFS_cond_class *cond_class_array= NULL;
- the performance schema initialization
- a plugin initialization
*/
static uint32 thread_class_dirty_count= 0;
static uint32 thread_class_allocated_count= 0;
static std::atomic<uint32> thread_class_dirty_count(0);
static std::atomic<uint32> thread_class_allocated_count(0);
static PFS_thread_class *thread_class_array= NULL;
......@@ -185,28 +185,28 @@ LF_HASH table_share_hash;
/** True if table_share_hash is initialized. */
static bool table_share_hash_inited= false;
static uint32 file_class_dirty_count= 0;
static uint32 file_class_allocated_count= 0;
static std::atomic<uint32> file_class_dirty_count(0);
static std::atomic<uint32> file_class_allocated_count(0);
PFS_file_class *file_class_array= NULL;
static uint32 stage_class_dirty_count= 0;
static uint32 stage_class_allocated_count= 0;
static std::atomic<uint32> stage_class_dirty_count(0);
static std::atomic<uint32> stage_class_allocated_count(0);
static PFS_stage_class *stage_class_array= NULL;
static uint32 statement_class_dirty_count= 0;
static uint32 statement_class_allocated_count= 0;
static std::atomic<uint32> statement_class_dirty_count(0);
static std::atomic<uint32> statement_class_allocated_count(0);
static PFS_statement_class *statement_class_array= NULL;
static uint32 socket_class_dirty_count= 0;
static uint32 socket_class_allocated_count= 0;
static std::atomic<uint32> socket_class_dirty_count(0);
static std::atomic<uint32> socket_class_allocated_count(0);
static PFS_socket_class *socket_class_array= NULL;
static uint32 memory_class_dirty_count= 0;
static uint32 memory_class_allocated_count= 0;
static std::atomic<uint32> memory_class_dirty_count(0);
static std::atomic<uint32> memory_class_allocated_count(0);
static PFS_memory_class *memory_class_array= NULL;
......@@ -1092,7 +1092,7 @@ PFS_sync_key register_mutex_class(const char *name, uint name_length,
mutex_class_dirty_count is incremented *before* an entry is added
mutex_class_allocated_count is incremented *after* an entry is added
*/
index= PFS_atomic::add_u32(&mutex_class_dirty_count, 1);
index= mutex_class_dirty_count.fetch_add(1);
if (index < mutex_class_max)
{
......@@ -1148,7 +1148,7 @@ PFS_sync_key register_mutex_class(const char *name, uint name_length,
empty/NULL/zero, but this won't cause a crash
(mutex_class_array is initialized with MY_ZEROFILL).
*/
PFS_atomic::add_u32(&mutex_class_allocated_count, 1);
mutex_class_allocated_count.fetch_add(1);
return (index + 1);
}
......@@ -1178,7 +1178,7 @@ PFS_sync_key register_rwlock_class(const char *name, uint name_length,
REGISTER_CLASS_BODY_PART(index, rwlock_class_array, rwlock_class_max,
name, name_length)
index= PFS_atomic::add_u32(&rwlock_class_dirty_count, 1);
index= rwlock_class_dirty_count.fetch_add(1);
if (index < rwlock_class_max)
{
......@@ -1191,7 +1191,7 @@ PFS_sync_key register_rwlock_class(const char *name, uint name_length,
entry->m_timed= false;
/* Set user-defined configuration options for this instrument */
configure_instr_class(entry);
PFS_atomic::add_u32(&rwlock_class_allocated_count, 1);
rwlock_class_allocated_count.fetch_add(1);
return (index + 1);
}
......@@ -1217,7 +1217,7 @@ PFS_sync_key register_cond_class(const char *name, uint name_length,
REGISTER_CLASS_BODY_PART(index, cond_class_array, cond_class_max,
name, name_length)
index= PFS_atomic::add_u32(&cond_class_dirty_count, 1);
index= cond_class_dirty_count.fetch_add(1);
if (index < cond_class_max)
{
......@@ -1229,7 +1229,7 @@ PFS_sync_key register_cond_class(const char *name, uint name_length,
entry->m_timed= false;
/* Set user-defined configuration options for this instrument */
configure_instr_class(entry);
PFS_atomic::add_u32(&cond_class_allocated_count, 1);
cond_class_allocated_count.fetch_add(1);
return (index + 1);
}
......@@ -1311,7 +1311,7 @@ PFS_thread_key register_thread_class(const char *name, uint name_length,
return (index + 1);
}
index= PFS_atomic::add_u32(&thread_class_dirty_count, 1);
index= thread_class_dirty_count.fetch_add(1);
if (index < thread_class_max)
{
......@@ -1320,7 +1320,7 @@ PFS_thread_key register_thread_class(const char *name, uint name_length,
strncpy(entry->m_name, name, name_length);
entry->m_name_length= name_length;
entry->m_enabled= true;
PFS_atomic::add_u32(&thread_class_allocated_count, 1);
thread_class_allocated_count.fetch_add(1);
return (index + 1);
}
......@@ -1361,7 +1361,7 @@ PFS_file_key register_file_class(const char *name, uint name_length,
REGISTER_CLASS_BODY_PART(index, file_class_array, file_class_max,
name, name_length)
index= PFS_atomic::add_u32(&file_class_dirty_count, 1);
index= file_class_dirty_count.fetch_add(1);
if (index < file_class_max)
{
......@@ -1373,7 +1373,7 @@ PFS_file_key register_file_class(const char *name, uint name_length,
entry->m_timed= true;
/* Set user-defined configuration options for this instrument */
configure_instr_class(entry);
PFS_atomic::add_u32(&file_class_allocated_count, 1);
file_class_allocated_count.fetch_add(1);
return (index + 1);
}
......@@ -1403,7 +1403,7 @@ PFS_stage_key register_stage_class(const char *name,
REGISTER_CLASS_BODY_PART(index, stage_class_array, stage_class_max,
name, name_length)
index= PFS_atomic::add_u32(&stage_class_dirty_count, 1);
index= stage_class_dirty_count.fetch_add(1);
if (index < stage_class_max)
{
......@@ -1427,7 +1427,7 @@ PFS_stage_key register_stage_class(const char *name,
/* Set user-defined configuration options for this instrument */
configure_instr_class(entry);
PFS_atomic::add_u32(&stage_class_allocated_count, 1);
stage_class_allocated_count.fetch_add(1);
return (index + 1);
}
......@@ -1454,7 +1454,7 @@ PFS_statement_key register_statement_class(const char *name, uint name_length,
REGISTER_CLASS_BODY_PART(index, statement_class_array, statement_class_max,
name, name_length)
index= PFS_atomic::add_u32(&statement_class_dirty_count, 1);
index= statement_class_dirty_count.fetch_add(1);
if (index < statement_class_max)
{
......@@ -1465,7 +1465,7 @@ PFS_statement_key register_statement_class(const char *name, uint name_length,
entry->m_timed= true;
/* Set user-defined configuration options for this instrument */
configure_instr_class(entry);
PFS_atomic::add_u32(&statement_class_allocated_count, 1);
statement_class_allocated_count.fetch_add(1);
return (index + 1);
}
......@@ -1537,7 +1537,7 @@ PFS_socket_key register_socket_class(const char *name, uint name_length,
REGISTER_CLASS_BODY_PART(index, socket_class_array, socket_class_max,
name, name_length)
index= PFS_atomic::add_u32(&socket_class_dirty_count, 1);
index= socket_class_dirty_count.fetch_add(1);
if (index < socket_class_max)
{
......@@ -1549,7 +1549,7 @@ PFS_socket_key register_socket_class(const char *name, uint name_length,
entry->m_timed= false;
/* Set user-defined configuration options for this instrument */
configure_instr_class(entry);
PFS_atomic::add_u32(&socket_class_allocated_count, 1);
socket_class_allocated_count.fetch_add(1);
return (index + 1);
}
......@@ -1590,7 +1590,7 @@ PFS_memory_key register_memory_class(const char *name, uint name_length,
REGISTER_CLASS_BODY_PART(index, memory_class_array, memory_class_max,
name, name_length)
index= PFS_atomic::add_u32(&memory_class_dirty_count, 1);
index= memory_class_dirty_count.fetch_add(1);
if (index < memory_class_max)
{
......@@ -1601,7 +1601,7 @@ PFS_memory_key register_memory_class(const char *name, uint name_length,
/* Set user-defined configuration options for this instrument */
configure_instr_class(entry);
entry->m_timed= false; /* Immutable */
PFS_atomic::add_u32(&memory_class_allocated_count, 1);
memory_class_allocated_count.fetch_add(1);
return (index + 1);
}
......
......@@ -23,11 +23,12 @@
#ifndef PFS_INSTR_CLASS_H
#define PFS_INSTR_CLASS_H
#include <atomic>
#include "my_global.h"
#include "mysql_com.h" /* NAME_LEN */
#include "lf.h"
#include "pfs_global.h"
#include "pfs_atomic.h"
#include "sql_array.h"
/**
......@@ -329,22 +330,22 @@ struct PFS_ALIGNED PFS_table_share
inline void init_refcount(void)
{
PFS_atomic::store_32(& m_refcount, 1);
m_refcount.store(1);
}
inline int get_refcount(void)
{
return PFS_atomic::load_32(& m_refcount);
return m_refcount.load();
}
inline void inc_refcount(void)
{
PFS_atomic::add_32(& m_refcount, 1);
m_refcount.fetch_add(1);
}
inline void dec_refcount(void)
{
PFS_atomic::add_32(& m_refcount, -1);
m_refcount.fetch_sub(1);
}
void refresh_setup_object_flags(PFS_thread *thread);
......@@ -387,7 +388,7 @@ struct PFS_ALIGNED PFS_table_share
private:
/** Number of opened table handles. */
int m_refcount;
std::atomic<int> m_refcount;
/** Table locks statistics. */
PFS_table_share_lock *m_race_lock_stat;
/** Table indexes' stats. */
......
......@@ -28,9 +28,9 @@
Performance schema internal locks (declarations).
*/
#include "my_global.h"
#include <atomic>
#include "pfs_atomic.h"
#include "my_global.h"
/* to cause bugs, testing */
// #define MEM(X) std::memory_order_relaxed
......@@ -103,7 +103,7 @@ struct pfs_lock
The version number is stored in the high 30 bits.
The state is stored in the low 2 bits.
*/
uint32 m_version_state;
std::atomic<uint32> m_version_state;
uint32 copy_version_state()
{
......@@ -119,7 +119,7 @@ struct pfs_lock
{
uint32 copy;
copy= PFS_atomic::load_u32(&m_version_state);
copy= m_version_state.load();
return ((copy & STATE_MASK) == PFS_LOCK_FREE);
}
......@@ -129,7 +129,7 @@ struct pfs_lock
{
uint32 copy;
copy= PFS_atomic::load_u32(&m_version_state);
copy= m_version_state.load();
return ((copy & STATE_MASK) == PFS_LOCK_ALLOCATED);
}
......@@ -144,7 +144,7 @@ struct pfs_lock
{
uint32 old_val;
old_val= PFS_atomic::load_u32(&m_version_state);
old_val= m_version_state.load();
if ((old_val & STATE_MASK) != PFS_LOCK_FREE)
{
......@@ -154,7 +154,7 @@ struct pfs_lock
uint32 new_val= (old_val & VERSION_MASK) + PFS_LOCK_DIRTY;
bool pass;
pass= PFS_atomic::cas_u32(&m_version_state, &old_val, new_val);
pass= m_version_state.compare_exchange_strong(old_val, new_val);
if (pass)
{
......@@ -178,7 +178,7 @@ struct pfs_lock
uint32 new_val= (copy & VERSION_MASK) + PFS_LOCK_DIRTY;
/* We own the record, no need to use compare and swap. */
PFS_atomic::store_u32(&m_version_state, new_val);
m_version_state.store(new_val);
copy_ptr->m_version_state= new_val;
}
......@@ -195,7 +195,7 @@ struct pfs_lock
/* Increment the version, set the ALLOCATED state */
uint32 new_val= (copy->m_version_state & VERSION_MASK) + VERSION_INC + PFS_LOCK_ALLOCATED;
PFS_atomic::store_u32(&m_version_state, new_val);
m_version_state.store(new_val);
}
/**
......@@ -210,7 +210,7 @@ struct pfs_lock
/* Increment the version, set the ALLOCATED state */
uint32 new_val= (copy & VERSION_MASK) + VERSION_INC + PFS_LOCK_ALLOCATED;
PFS_atomic::store_u32(&m_version_state, new_val);
m_version_state.store(new_val);
}
/**
......@@ -219,10 +219,10 @@ struct pfs_lock
void set_dirty(pfs_dirty_state *copy_ptr)
{
/* Do not set the version to 0, read the previous value. */
uint32 copy= PFS_atomic::load_u32(&m_version_state);
uint32 copy= m_version_state.load();
/* Increment the version, set the DIRTY state */
uint32 new_val= (copy & VERSION_MASK) + VERSION_INC + PFS_LOCK_DIRTY;
PFS_atomic::store_u32(&m_version_state, new_val);
m_version_state.store(new_val);
copy_ptr->m_version_state= new_val;
}
......@@ -238,7 +238,7 @@ struct pfs_lock
/* Keep the same version, set the FREE state */
uint32 new_val= (copy->m_version_state & VERSION_MASK) + PFS_LOCK_FREE;
PFS_atomic::store_u32(&m_version_state, new_val);
m_version_state.store(new_val);
}
/**
......@@ -258,7 +258,7 @@ struct pfs_lock
/* Keep the same version, set the FREE state */
uint32 new_val= (copy & VERSION_MASK) + PFS_LOCK_FREE;
PFS_atomic::store_u32(&m_version_state, new_val);
m_version_state.store(new_val);
}
/**
......@@ -268,7 +268,7 @@ struct pfs_lock
*/
void begin_optimistic_lock(struct pfs_optimistic_state *copy)
{
copy->m_version_state= PFS_atomic::load_u32(&m_version_state);
copy->m_version_state= m_version_state.load();
}
/**
......@@ -285,7 +285,7 @@ struct pfs_lock
if ((copy->m_version_state & STATE_MASK) != PFS_LOCK_ALLOCATED)
return false;
version_state= PFS_atomic::load_u32(&m_version_state);
version_state= m_version_state.load();
/* Check the version + state has not changed. */
if (copy->m_version_state != version_state)
......@@ -298,7 +298,7 @@ struct pfs_lock
{
uint32 version_state;
version_state= PFS_atomic::load_u32(&m_version_state);
version_state= m_version_state.load();
return (version_state & VERSION_MASK);
}
......
......@@ -33,7 +33,6 @@
#include "pfs_account.h"
#include "pfs_host.h"
#include "pfs_user.h"
#include "pfs_atomic.h"
#include "pfs_buffer_container.h"
#include "m_string.h"
......
......@@ -34,7 +34,6 @@
#include "pfs_host.h"
#include "pfs_user.h"
#include "pfs_status.h"
#include "pfs_atomic.h"
#include "pfs_buffer_container.h"
#include "sql_show.h" /* reset_status_vars */
......
......@@ -28,6 +28,8 @@
Performance schema user (declarations).
*/
#include <atomic>
#include "pfs_lock.h"
#include "lf.h"
#include "pfs_con_slice.h"
......@@ -58,22 +60,22 @@ struct PFS_ALIGNED PFS_user : public PFS_connection_slice
public:
inline void init_refcount(void)
{
PFS_atomic::store_32(& m_refcount, 1);
m_refcount.store(1);
}
inline int get_refcount(void)
{
return PFS_atomic::load_32(& m_refcount);
return m_refcount.load();
}
inline void inc_refcount(void)
{
PFS_atomic::add_32(& m_refcount, 1);
m_refcount.fetch_add(1);
}
inline void dec_refcount(void)
{
PFS_atomic::add_32(& m_refcount, -1);
m_refcount.fetch_sub(1);
}
void aggregate(bool alive);
......@@ -97,7 +99,7 @@ struct PFS_ALIGNED PFS_user : public PFS_connection_slice
ulonglong m_disconnected_count;
private:
int m_refcount;
std::atomic<int> m_refcount;
};
int init_user(const PFS_global_param *param);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment