Commit 0ce7f6b0 authored by Sergey Vojtovich's avatar Sergey Vojtovich

MDEV-17441 - InnoDB transition to C++11 atomics

purge_sys_t::m_enabled transition to std::atomic.

enabled_latched() doesn't make much sense: in this particular case it is
as fast as atomic load. The sole caller has to reload it's value anyway,
due to rw_lock_x_lock(&purge_sys.latch) issuing acquire memory barrier.

When purge_sys_t::close() is reached, m_enabled must be false, otherwise
we may free members, which are still in use by the coordinator thread.
parent f401ba47
...@@ -147,8 +147,8 @@ class purge_sys_t ...@@ -147,8 +147,8 @@ class purge_sys_t
MY_ALIGNED(CACHE_LINE_SIZE) MY_ALIGNED(CACHE_LINE_SIZE)
rw_lock_t latch; rw_lock_t latch;
private: private:
/** whether purge is enabled; protected by latch and my_atomic */ /** whether purge is enabled; protected by latch and std::atomic */
int32_t m_enabled; std::atomic<bool> m_enabled;
/** number of pending stop() calls without resume() */ /** number of pending stop() calls without resume() */
Atomic_counter<int32_t> m_paused; Atomic_counter<int32_t> m_paused;
public: public:
...@@ -242,16 +242,7 @@ class purge_sys_t ...@@ -242,16 +242,7 @@ class purge_sys_t
void close(); void close();
/** @return whether purge is enabled */ /** @return whether purge is enabled */
bool enabled() bool enabled() { return m_enabled.load(std::memory_order_relaxed); }
{
return my_atomic_load32_explicit(&m_enabled, MY_MEMORY_ORDER_RELAXED);
}
/** @return whether purge is enabled */
bool enabled_latched()
{
ut_ad(rw_lock_own_flagged(&latch, RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
return bool(m_enabled);
}
/** @return whether the purge coordinator is paused */ /** @return whether the purge coordinator is paused */
bool paused() bool paused()
{ return m_paused != 0; } { return m_paused != 0; }
...@@ -261,14 +252,14 @@ class purge_sys_t ...@@ -261,14 +252,14 @@ class purge_sys_t
void coordinator_startup() void coordinator_startup()
{ {
ut_ad(!enabled()); ut_ad(!enabled());
my_atomic_store32_explicit(&m_enabled, true, MY_MEMORY_ORDER_RELAXED); m_enabled.store(true, std::memory_order_relaxed);
} }
/** Disable purge at shutdown */ /** Disable purge at shutdown */
void coordinator_shutdown() void coordinator_shutdown()
{ {
ut_ad(enabled()); ut_ad(enabled());
my_atomic_store32_explicit(&m_enabled, false, MY_MEMORY_ORDER_RELAXED); m_enabled.store(false, std::memory_order_relaxed);
} }
/** @return whether the purge coordinator thread is active */ /** @return whether the purge coordinator thread is active */
......
...@@ -184,7 +184,7 @@ void purge_sys_t::close() ...@@ -184,7 +184,7 @@ void purge_sys_t::close()
ut_ad(this == &purge_sys); ut_ad(this == &purge_sys);
if (!event) return; if (!event) return;
m_enabled= false; ut_ad(!enabled());
trx_t* trx = query->trx; trx_t* trx = query->trx;
que_graph_free(query); que_graph_free(query);
ut_ad(!trx->id); ut_ad(!trx->id);
...@@ -1351,7 +1351,7 @@ void purge_sys_t::stop() ...@@ -1351,7 +1351,7 @@ void purge_sys_t::stop()
{ {
rw_lock_x_lock(&latch); rw_lock_x_lock(&latch);
if (!enabled_latched()) if (!enabled())
{ {
/* Shutdown must have been initiated during FLUSH TABLES FOR EXPORT. */ /* Shutdown must have been initiated during FLUSH TABLES FOR EXPORT. */
ut_ad(!srv_undo_sources); ut_ad(!srv_undo_sources);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment