Commit fb335b48 authored by Marko Mäkelä's avatar Marko Mäkelä

Allocate purge_sys statically

There is only one purge_sys. Allocate it statically in order to avoid
dereferencing a pointer whenever accessing it. Also, align some
members to their own cache line in order to avoid false sharing.

purge_sys_t::create(): The deferred constructor.

purge_sys_t::close(): The early destructor.

undo::Truncate::create(): The deferred constructor.
Because purge_sys.undo_trunc is constructed before the start-up
parameters are parsed, the normal constructor would copy a
wrong value of srv_purge_rseg_truncate_frequency.

TrxUndoRsegsIterator: Do not forward-declare an inline constructor,
because the static construction of purge_sys.rseg_iter would not have
access to it.
parent a3a2b898
......@@ -142,7 +142,7 @@ typedef std::priority_queue<
/** Chooses the rollback segment with the oldest committed transaction */
struct TrxUndoRsegsIterator {
/** Constructor */
inline TrxUndoRsegsIterator();
TrxUndoRsegsIterator();
/** Sets the next rseg to purge in purge_sys.
Executed in the purge coordinator thread.
@return whether anything is to be purged */
......@@ -204,17 +204,12 @@ namespace undo {
/** Track UNDO tablespace mark for truncate. */
class Truncate {
public:
Truncate()
:
m_undo_for_trunc(ULINT_UNDEFINED),
m_rseg_for_trunc(),
m_scan_start(1),
m_purge_rseg_truncate_frequency(
static_cast<ulint>(
srv_purge_rseg_truncate_frequency))
void create()
{
/* Do Nothing. */
m_undo_for_trunc = ULINT_UNDEFINED;
m_scan_start = 1;
m_purge_rseg_truncate_frequency =
ulint(srv_purge_rseg_truncate_frequency);
}
/** Clear the cached rollback segment. Normally done
......@@ -401,12 +396,9 @@ namespace undo {
/** The control structure used in the purge operation */
class purge_sys_t
{
bool m_initialised;
public:
/** Construct the purge system. */
purge_sys_t();
/** Destruct the purge system. */
~purge_sys_t();
MY_ALIGNED(CACHE_LINE_SIZE)
rw_lock_t latch; /*!< The latch protecting the purge
view. A purge operation must acquire an
x-latch here for the instant at which
......@@ -414,11 +406,14 @@ class purge_sys_t
log operation can prevent this by
obtaining an s-latch here. It also
protects state and running */
MY_ALIGNED(CACHE_LINE_SIZE)
os_event_t event; /*!< State signal event;
os_event_set() and os_event_reset()
are protected by purge_sys_t::latch
X-lock */
MY_ALIGNED(CACHE_LINE_SIZE)
ulint n_stop; /*!< Counter to track number stops */
volatile bool running; /*!< true, if purge is active,
we check this without the latch too */
volatile purge_state_t state; /*!< Purge coordinator thread states,
......@@ -426,6 +421,7 @@ class purge_sys_t
without holding the latch. */
que_t* query; /*!< The query graph which will do the
parallelized purge operation */
MY_ALIGNED(CACHE_LINE_SIZE)
ReadView view; /*!< The purge will not remove undo logs
which are >= this view (purge view) */
ulint n_submitted; /*!< Count of total tasks submitted
......@@ -486,10 +482,30 @@ class purge_sys_t
undo::Truncate undo_trunc; /*!< Track UNDO tablespace marked
for truncate. */
/**
Constructor.
Some members may require late initialisation, thus we just mark object as
uninitialised. Real initialisation happens in create().
*/
purge_sys_t() : m_initialised(false) {}
bool is_initialised() const { return m_initialised; }
/** Create the instance */
void create();
/** Close the purge system on shutdown */
void close();
};
/** The global data structure coordinating a purge */
extern purge_sys_t* purge_sys;
extern purge_sys_t purge_sys;
/** Info required to purge a record */
struct trx_purge_rec_t {
......
......@@ -996,10 +996,10 @@ class trx_sys_t
bool is_initialised() { return m_initialised; }
/** Create the instance */
/** Initialise the purge subsystem. */
void create();
/** Close the transaction system on shutdown */
/** Close the purge subsystem on shutdown. */
void close();
/** @return total number of active (non-prepared) transactions */
......
......@@ -5258,14 +5258,14 @@ lock_print_info_summary(
fprintf(file,
"Purge done for trx's n:o < " TRX_ID_FMT
" undo n:o < " TRX_ID_FMT " state: ",
purge_sys->tail.trx_no(),
purge_sys->tail.undo_no);
purge_sys.tail.trx_no(),
purge_sys.tail.undo_no);
/* Note: We are reading the state without the latch. One because it
will violate the latching order and two because we are merely querying
the state of the variable for display. */
switch (purge_sys->state){
switch (purge_sys.state){
case PURGE_STATE_INIT:
/* Should never be in this state while the system is running. */
ut_error;
......@@ -5281,7 +5281,7 @@ lock_print_info_summary(
case PURGE_STATE_RUN:
fprintf(file, "running");
/* Check if it is waiting for more data to arrive. */
if (!purge_sys->running) {
if (!purge_sys.running) {
fprintf(file, " but idle");
}
break;
......
......@@ -319,7 +319,7 @@ void ReadView::close()
*/
void trx_sys_t::clone_oldest_view()
{
purge_sys->view.snapshot(0);
purge_sys.view.snapshot(0);
mutex_enter(&mutex);
/* Find oldest view. */
for (const ReadView *v= UT_LIST_GET_FIRST(m_views); v;
......@@ -331,7 +331,7 @@ void trx_sys_t::clone_oldest_view()
ut_delay(1);
if (state == READ_VIEW_STATE_OPEN)
purge_sys->view.copy(*v);
purge_sys.view.copy(*v);
}
mutex_exit(&mutex);
}
......@@ -436,11 +436,11 @@ row_vers_must_preserve_del_marked(
const table_name_t& name,
mtr_t* mtr)
{
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_S));
ut_ad(!rw_lock_own(&(purge_sys.latch), RW_LOCK_S));
mtr_s_lock(&purge_sys->latch, mtr);
mtr_s_lock(&purge_sys.latch, mtr);
return(!purge_sys->view.changes_visible(trx_id, name));
return(!purge_sys.view.changes_visible(trx_id, name));
}
/** build virtual column value from current cluster index record data
......@@ -866,7 +866,7 @@ row_vers_old_has_index_entry(
ut_ad(mtr_memo_contains_page_flagged(mtr, rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_S));
ut_ad(!rw_lock_own(&(purge_sys.latch), RW_LOCK_S));
clust_index = dict_table_get_first_index(index->table);
......@@ -889,7 +889,7 @@ row_vers_old_has_index_entry(
/* The top of the stack of versions is locked by the
mtr holding a latch on the page containing the
clustered index record. The bottom of the stack is
locked by the fact that the purge_sys->view must
locked by the fact that the purge_sys.view must
'overtake' any read view of an active transaction.
Thus, it is safe to fetch the prefixes for
externally stored columns. */
......@@ -1121,7 +1121,7 @@ row_vers_build_for_consistent_read(
ut_ad(dict_index_is_clust(index));
ut_ad(mtr_memo_contains_page_flagged(mtr, rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_S));
ut_ad(!rw_lock_own(&(purge_sys.latch), RW_LOCK_S));
ut_ad(rec_offs_validate(rec, index, *offsets));
......@@ -1234,7 +1234,7 @@ row_vers_build_for_semi_consistent_read(
ut_ad(dict_index_is_clust(index));
ut_ad(mtr_memo_contains_page_flagged(mtr, rec, MTR_MEMO_PAGE_X_FIX
| MTR_MEMO_PAGE_S_FIX));
ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_S));
ut_ad(!rw_lock_own(&(purge_sys.latch), RW_LOCK_S));
ut_ad(rec_offs_validate(rec, index, *offsets));
......
......@@ -1923,7 +1923,7 @@ srv_get_active_thread_type(void)
srv_sys_mutex_exit();
if (ret == SRV_NONE && srv_shutdown_state != SRV_SHUTDOWN_NONE
&& purge_sys != NULL) {
&& purge_sys.is_initialised()) {
/* Check only on shutdown. */
switch (trx_purge_state()) {
case PURGE_STATE_RUN:
......@@ -1973,7 +1973,7 @@ srv_wake_purge_thread_if_not_active()
{
ut_ad(!srv_sys_mutex_own());
if (purge_sys->state == PURGE_STATE_RUN
if (purge_sys.state == PURGE_STATE_RUN
&& !my_atomic_loadlint(&srv_sys.n_threads_active[SRV_PURGE])
&& trx_sys.history_size()) {
......@@ -2506,7 +2506,7 @@ srv_task_execute(void)
que_run_threads(thr);
my_atomic_addlint(
&purge_sys->n_completed, 1);
&purge_sys.n_completed, 1);
}
return(thr != NULL);
......@@ -2559,17 +2559,17 @@ DECLARE_THREAD(srv_worker_thread)(
}
/* Note: we are checking the state without holding the
purge_sys->latch here. */
} while (purge_sys->state != PURGE_STATE_EXIT);
purge_sys.latch here. */
} while (purge_sys.state != PURGE_STATE_EXIT);
srv_free_slot(slot);
rw_lock_x_lock(&purge_sys->latch);
rw_lock_x_lock(&purge_sys.latch);
ut_a(!purge_sys->running);
ut_a(purge_sys->state == PURGE_STATE_EXIT);
ut_a(!purge_sys.running);
ut_a(purge_sys.state == PURGE_STATE_EXIT);
rw_lock_x_unlock(&purge_sys->latch);
rw_lock_x_unlock(&purge_sys.latch);
#ifdef UNIV_DEBUG_THREAD_CREATION
ib::info() << "Purge worker thread exiting, id "
......@@ -2648,7 +2648,7 @@ srv_do_purge(ulint* n_total_purged)
}
ulint undo_trunc_freq =
purge_sys->undo_trunc.get_rseg_truncate_frequency();
purge_sys.undo_trunc.get_rseg_truncate_frequency();
ulint rseg_truncate_frequency = ut_min(
static_cast<ulint>(srv_purge_rseg_truncate_frequency),
......@@ -2662,7 +2662,7 @@ srv_do_purge(ulint* n_total_purged)
} while (!srv_purge_should_exit(n_pages_purged)
&& n_pages_purged > 0
&& purge_sys->state == PURGE_STATE_RUN);
&& purge_sys.state == PURGE_STATE_RUN);
return(rseg_history_len);
}
......@@ -2689,11 +2689,11 @@ srv_purge_coordinator_suspend(
int64_t sig_count = srv_suspend_thread(slot);
do {
rw_lock_x_lock(&purge_sys->latch);
rw_lock_x_lock(&purge_sys.latch);
purge_sys->running = false;
purge_sys.running = false;
rw_lock_x_unlock(&purge_sys->latch);
rw_lock_x_unlock(&purge_sys.latch);
/* We don't wait right away on the the non-timed wait because
we want to signal the thread that wants to suspend purge. */
......@@ -2705,14 +2705,14 @@ srv_purge_coordinator_suspend(
sig_count = srv_suspend_thread(slot);
rw_lock_x_lock(&purge_sys->latch);
rw_lock_x_lock(&purge_sys.latch);
stop = (srv_shutdown_state == SRV_SHUTDOWN_NONE
&& purge_sys->state == PURGE_STATE_STOP);
&& purge_sys.state == PURGE_STATE_STOP);
if (!stop) {
ut_a(purge_sys->n_stop == 0);
purge_sys->running = true;
ut_a(purge_sys.n_stop == 0);
purge_sys.running = true;
if (timeout
&& rseg_history_len < 5000
......@@ -2727,13 +2727,13 @@ srv_purge_coordinator_suspend(
stop = true;
}
} else {
ut_a(purge_sys->n_stop > 0);
ut_a(purge_sys.n_stop > 0);
/* Signal that we are suspended. */
os_event_set(purge_sys->event);
os_event_set(purge_sys.event);
}
rw_lock_x_unlock(&purge_sys->latch);
rw_lock_x_unlock(&purge_sys.latch);
} while (stop && srv_undo_sources);
srv_resume_thread(slot, 0, false);
......@@ -2759,12 +2759,12 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
ut_a(trx_purge_state() == PURGE_STATE_INIT);
ut_a(srv_force_recovery < SRV_FORCE_NO_BACKGROUND);
rw_lock_x_lock(&purge_sys->latch);
rw_lock_x_lock(&purge_sys.latch);
purge_sys->running = true;
purge_sys->state = PURGE_STATE_RUN;
purge_sys.running = true;
purge_sys.state = PURGE_STATE_RUN;
rw_lock_x_unlock(&purge_sys->latch);
rw_lock_x_unlock(&purge_sys.latch);
#ifdef UNIV_PFS_THREAD
pfs_register_thread(srv_purge_thread_key);
......@@ -2785,7 +2785,7 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
if (srv_shutdown_state == SRV_SHUTDOWN_NONE
&& srv_undo_sources
&& (purge_sys->state == PURGE_STATE_STOP
&& (purge_sys.state == PURGE_STATE_STOP
|| n_total_purged == 0)) {
srv_purge_coordinator_suspend(slot, rseg_history_len);
......@@ -2809,20 +2809,20 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
srv_free_slot(slot);
/* Note that we are shutting down. */
rw_lock_x_lock(&purge_sys->latch);
rw_lock_x_lock(&purge_sys.latch);
purge_sys->state = PURGE_STATE_EXIT;
purge_sys.state = PURGE_STATE_EXIT;
/* If there are any pending undo-tablespace truncate then clear
it off as we plan to shutdown the purge thread. */
purge_sys->undo_trunc.clear();
purge_sys.undo_trunc.clear();
purge_sys->running = false;
purge_sys.running = false;
/* Ensure that the wait in trx_purge_stop() will terminate. */
os_event_set(purge_sys->event);
os_event_set(purge_sys.event);
rw_lock_x_unlock(&purge_sys->latch);
rw_lock_x_unlock(&purge_sys.latch);
#ifdef UNIV_DEBUG_THREAD_CREATION
ib::info() << "Purge coordinator exiting, id "
......
......@@ -2661,7 +2661,7 @@ innobase_start_or_create_for_mysql()
srv_start_state_set(SRV_START_STATE_PURGE);
} else {
purge_sys->state = PURGE_STATE_DISABLED;
purge_sys.state = PURGE_STATE_DISABLED;
}
srv_is_being_started = false;
......@@ -2871,8 +2871,7 @@ innodb_shutdown()
log_shutdown();
}
trx_sys.close();
UT_DELETE(purge_sys);
purge_sys = NULL;
purge_sys.close();
if (buf_dblwr) {
buf_dblwr_free();
}
......
This diff is collapsed.
......@@ -2204,14 +2204,14 @@ trx_undo_get_undo_rec(
{
bool missing_history;
rw_lock_s_lock(&purge_sys->latch);
rw_lock_s_lock(&purge_sys.latch);
missing_history = purge_sys->view.changes_visible(trx_id, name);
missing_history = purge_sys.view.changes_visible(trx_id, name);
if (!missing_history) {
*undo_rec = trx_undo_get_undo_rec_low(roll_ptr, is_temp, heap);
}
rw_lock_s_unlock(&purge_sys->latch);
rw_lock_s_unlock(&purge_sys.latch);
return(missing_history);
}
......@@ -2273,7 +2273,7 @@ trx_undo_prev_version_build(
bool dummy_extern;
byte* buf;
ut_ad(!rw_lock_own(&purge_sys->latch, RW_LOCK_S));
ut_ad(!rw_lock_own(&purge_sys.latch, RW_LOCK_S));
ut_ad(mtr_memo_contains_page_flagged(index_mtr, index_rec,
MTR_MEMO_PAGE_S_FIX
| MTR_MEMO_PAGE_X_FIX));
......@@ -2323,12 +2323,12 @@ trx_undo_prev_version_build(
&info_bits);
/* (a) If a clustered index record version is such that the
trx id stamp in it is bigger than purge_sys->view, then the
trx id stamp in it is bigger than purge_sys.view, then the
BLOBs in that version are known to exist (the purge has not
progressed that far);
(b) if the version is the first version such that trx id in it
is less than purge_sys->view, and it is not delete-marked,
is less than purge_sys.view, and it is not delete-marked,
then the BLOBs in that version are known to exist (the purge
cannot have purged the BLOBs referenced by that version
yet).
......@@ -2367,19 +2367,19 @@ trx_undo_prev_version_build(
the BLOB. */
/* the row_upd_changes_disowned_external(update) call could be
omitted, but the synchronization on purge_sys->latch is likely
omitted, but the synchronization on purge_sys.latch is likely
more expensive. */
if ((update->info_bits & REC_INFO_DELETED_FLAG)
&& row_upd_changes_disowned_external(update)) {
bool missing_extern;
rw_lock_s_lock(&purge_sys->latch);
rw_lock_s_lock(&purge_sys.latch);
missing_extern = purge_sys->view.changes_visible(
missing_extern = purge_sys.view.changes_visible(
trx_id, index->table->name);
rw_lock_s_unlock(&purge_sys->latch);
rw_lock_s_unlock(&purge_sys.latch);
if (missing_extern) {
/* treat as a fresh insert, not to
......
......@@ -483,7 +483,7 @@ trx_rseg_mem_restore(
/* There is no need to cover this operation by the purge
mutex because we are still bootstrapping. */
purge_sys->purge_queue.push(*rseg);
purge_sys.purge_queue.push(*rseg);
}
}
}
......
......@@ -883,7 +883,7 @@ trx_lists_init_at_db_start()
{
ut_a(srv_is_being_started);
ut_ad(!srv_was_started);
ut_ad(!purge_sys);
ut_ad(!purge_sys.is_initialised());
if (srv_operation == SRV_OPERATION_RESTORE) {
/* mariabackup --prepare only deals with
......@@ -893,12 +893,11 @@ trx_lists_init_at_db_start()
return;
}
purge_sys = UT_NEW_NOKEY(purge_sys_t());
if (srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN) {
return;
}
purge_sys.create();
trx_rseg_array_init();
/* Look from the rollback segments if there exist undo logs for
......@@ -1219,7 +1218,7 @@ trx_serialise(trx_t* trx)
ut_ad(mutex_own(&rseg->mutex));
if (rseg->last_page_no == FIL_NULL) {
mutex_enter(&purge_sys->pq_mutex);
mutex_enter(&purge_sys.pq_mutex);
}
trx_sys.assign_new_trx_no(trx);
......@@ -1229,8 +1228,8 @@ trx_serialise(trx_t* trx)
already in the rollback segment. User threads only
produce events when a rollback segment is empty. */
if (rseg->last_page_no == FIL_NULL) {
purge_sys->purge_queue.push(TrxUndoRsegs(trx->no, *rseg));
mutex_exit(&purge_sys->pq_mutex);
purge_sys.purge_queue.push(TrxUndoRsegs(trx->no, *rseg));
mutex_exit(&purge_sys.pq_mutex);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment