Commit 97055e6b authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-14154: Remove ut_time_us()

Use microsecond_interval_timer()
or my_interval_timer() [in nanoseconds] instead.
parent 058c385e
......@@ -639,7 +639,7 @@ buf_buddy_relocate(
if (buf_page_can_relocate(bpage)) {
/* Relocate the compressed page. */
uintmax_t usec = ut_time_us(NULL);
const ulonglong ns = my_interval_timer();
ut_a(bpage->zip.data == src);
......@@ -655,7 +655,7 @@ buf_buddy_relocate(
buf_buddy_stat_t* buddy_stat = &buf_pool->buddy_stat[i];
buddy_stat->relocated++;
buddy_stat->relocated_usec += ut_time_us(NULL) - usec;
buddy_stat->relocated_usec+= (my_interval_timer() - ns) / 1000;
return(true);
}
......
......@@ -1625,19 +1625,18 @@ fil_crypt_get_page_throttle_func(
state->crypt_stat.pages_read_from_disk++;
uintmax_t start = ut_time_us(NULL);
const ulonglong start = my_interval_timer();
block = buf_page_get_gen(page_id, page_size,
RW_X_LATCH,
NULL, BUF_GET_POSSIBLY_FREED,
file, line, mtr, &err);
uintmax_t end = ut_time_us(NULL);
if (end < start) {
end = start; // safety...
}
const ulonglong end = my_interval_timer();
state->cnt_waited++;
state->sum_waited_us += (end - start);
if (end > start) {
state->sum_waited_us += (end - start) / 1000;
}
/* average page load */
ulint add_sleeptime_ms = 0;
......@@ -1961,7 +1960,7 @@ fil_crypt_flush_space(
bool success = false;
ulint n_pages = 0;
ulint sum_pages = 0;
uintmax_t start = ut_time_us(NULL);
const ulonglong start = my_interval_timer();
do {
success = buf_flush_lists(ULINT_MAX, end_lsn, &n_pages);
......@@ -1969,11 +1968,11 @@ fil_crypt_flush_space(
sum_pages += n_pages;
} while (!success && !space->is_stopping());
uintmax_t end = ut_time_us(NULL);
const ulonglong end = my_interval_timer();
if (sum_pages && end > start) {
state->cnt_waited += sum_pages;
state->sum_waited_us += (end - start);
state->sum_waited_us += (end - start) / 1000;
/* statistics */
state->crypt_stat.pages_flushed += sum_pages;
......
......@@ -1767,12 +1767,13 @@ innobase_srv_conc_enter_innodb(
} else if (trx->mysql_thd != NULL
&& thd_is_replication_slave_thread(trx->mysql_thd)) {
UT_WAIT_FOR(
srv_conc_get_active_threads()
< srv_thread_concurrency,
srv_replication_delay * 1000);
const ulonglong end = my_interval_timer()
+ ulonglong(srv_replication_delay) * 1000000;
while (srv_conc_get_active_threads()
>= srv_thread_concurrency
|| my_interval_timer() >= end) {
os_thread_sleep(2000 /* 2 ms */);
}
} else {
srv_conc_enter_innodb(prebuilt);
}
......
......@@ -2,7 +2,7 @@
Copyright (c) 2010, 2015, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
Copyright (c) 2013, 2017, MariaDB Corporation.
Copyright (c) 2013, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
......@@ -718,8 +718,8 @@ monitor counter
#define MONITOR_INC_TIME_IN_MICRO_SECS(monitor, value) \
MONITOR_CHECK_DEFINED(value); \
if (MONITOR_IS_ON(monitor)) { \
uintmax_t old_time = (value); \
value = ut_time_us(NULL); \
uintmax_t old_time = value; \
value = microsecond_interval_timer(); \
MONITOR_VALUE(monitor) += (mon_type_t) (value - old_time);\
}
......
......@@ -99,22 +99,6 @@ typedef time_t ib_time_t;
# define UT_RESUME_PRIORITY_CPU() ((void)0)
#endif
/*********************************************************************//**
Delays execution for at most max_wait_us microseconds or returns earlier
if cond becomes true.
@param cond in: condition to wait for; evaluated every 2 ms
@param max_wait_us in: maximum delay to wait, in microseconds */
# define UT_WAIT_FOR(cond, max_wait_us) \
do { \
uintmax_t start_us; \
start_us = ut_time_us(NULL); \
while (!(cond) \
&& ut_time_us(NULL) - start_us < (max_wait_us)) {\
\
os_thread_sleep(2000 /* 2 ms */); \
} \
} while (0)
#define ut_max std::max
#define ut_min std::min
......@@ -237,15 +221,6 @@ ut_usectime(
ulint* ms); /*!< out: microseconds since the Epoch+*sec */
/**********************************************************//**
Returns the number of microseconds since epoch. Similar to
time(3), the return value is also stored in *tloc, provided
that tloc is non-NULL.
@return us since epoch */
uintmax_t
ut_time_us(
/*=======*/
uintmax_t* tloc); /*!< out: us since epoch, if non-NULL */
/**********************************************************//**
Returns the number of milliseconds since some epoch. The
value may wrap around. It should only be used for heuristic
purposes.
......
......@@ -41,7 +41,7 @@ number between 0 and 2^64-1 inclusive. The formula and the constants
being used are:
X[n+1] = (a * X[n] + c) mod m
where:
X[0] = ut_time_us(NULL)
X[0] = my_interval_timer()
a = 1103515245 (3^5 * 5 * 7 * 129749)
c = 12345 (3 * 5 * 823)
m = 18446744073709551616 (2^64)
......@@ -54,12 +54,10 @@ page_cur_lcg_prng(void)
{
#define LCG_a 1103515245
#define LCG_c 12345
static ib_uint64_t lcg_current = 0;
static ibool initialized = FALSE;
static uint64_t lcg_current;
if (!initialized) {
lcg_current = (ib_uint64_t) ut_time_us(NULL);
initialized = TRUE;
if (!lcg_current) {
lcg_current = my_interval_timer();
}
/* no need to "% 2^64" explicitly because lcg_current is
......
......@@ -1277,7 +1277,7 @@ page_zip_compress(
byte* storage; /* storage of uncompressed
columns */
index_id_t ind_id;
uintmax_t usec = ut_time_us(NULL);
const ulonglong ns = my_interval_timer();
#ifdef PAGE_ZIP_COMPRESS_DBG
FILE* logfile = NULL;
#endif
......@@ -1564,7 +1564,7 @@ page_zip_compress(
dict_index_zip_failure(index);
}
uintmax_t time_diff = ut_time_us(NULL) - usec;
const uint64_t time_diff = (my_interval_timer() - ns) / 1000;
page_zip_stat[page_zip->ssize - 1].compressed_usec
+= time_diff;
if (cmp_per_index_enabled) {
......@@ -1630,7 +1630,7 @@ page_zip_compress(
fclose(logfile);
}
#endif /* PAGE_ZIP_COMPRESS_DBG */
uintmax_t time_diff = ut_time_us(NULL) - usec;
const uint64_t time_diff = (my_interval_timer() - ns) / 1000;
page_zip_stat[page_zip->ssize - 1].compressed_ok++;
page_zip_stat[page_zip->ssize - 1].compressed_usec += time_diff;
if (cmp_per_index_enabled) {
......@@ -3250,13 +3250,13 @@ page_zip_decompress(
page header fields that should not change
after page creation */
{
uintmax_t usec = ut_time_us(NULL);
const ulonglong ns = my_interval_timer();
if (!page_zip_decompress_low(page_zip, page, all)) {
return(FALSE);
}
uintmax_t time_diff = ut_time_us(NULL) - usec;
const uint64_t time_diff = (my_interval_timer() - ns) / 1000;
page_zip_stat[page_zip->ssize - 1].decompressed++;
page_zip_stat[page_zip->ssize - 1].decompressed_usec += time_diff;
......
......@@ -2194,7 +2194,7 @@ srv_master_do_active_tasks(void)
/*============================*/
{
ib_time_t cur_time = ut_time();
uintmax_t counter_time = ut_time_us(NULL);
ulonglong counter_time = microsecond_interval_timer();
/* First do the tasks that we are suppose to do at each
invocation of this function. */
......@@ -2224,7 +2224,7 @@ srv_master_do_active_tasks(void)
/* Do an ibuf merge */
srv_main_thread_op_info = "doing insert buffer merge";
counter_time = ut_time_us(NULL);
counter_time = microsecond_interval_timer();
ibuf_merge_in_background(false);
MONITOR_INC_TIME_IN_MICRO_SECS(
MONITOR_SRV_IBUF_MERGE_MICROSECOND, counter_time);
......@@ -2289,8 +2289,6 @@ void
srv_master_do_idle_tasks(void)
/*==========================*/
{
uintmax_t counter_time;
++srv_main_idle_loops;
MONITOR_INC(MONITOR_MASTER_IDLE_LOOPS);
......@@ -2299,7 +2297,7 @@ srv_master_do_idle_tasks(void)
/* ALTER TABLE in MySQL requires on Unix that the table handler
can drop tables lazily after there no longer are SELECT
queries to them. */
counter_time = ut_time_us(NULL);
ulonglong counter_time = microsecond_interval_timer();
srv_main_thread_op_info = "doing background drop tables";
row_drop_tables_for_mysql_in_background();
MONITOR_INC_TIME_IN_MICRO_SECS(
......@@ -2318,7 +2316,7 @@ srv_master_do_idle_tasks(void)
log_free_check();
/* Do an ibuf merge */
counter_time = ut_time_us(NULL);
counter_time = microsecond_interval_timer();
srv_main_thread_op_info = "doing insert buffer merge";
ibuf_merge_in_background(true);
MONITOR_INC_TIME_IN_MICRO_SECS(
......
......@@ -140,9 +140,8 @@ struct i_s_table_cache_t {
struct trx_i_s_cache_t {
rw_lock_t rw_lock; /*!< read-write lock protecting
the rest of this structure */
uintmax_t last_read; /*!< last time the cache was read;
measured in microseconds since
epoch */
ulonglong last_read; /*!< last time the cache was read;
measured in nanoseconds */
ib_mutex_t last_read_mutex;/*!< mutex protecting the
last_read member - it is updated
inside a shared lock of the
......@@ -1183,22 +1182,16 @@ add_trx_relevant_locks_to_cache(
}
/** The minimum time that a cache must not be updated after it has been
read for the last time; measured in microseconds. We use this technique
read for the last time; measured in nanoseconds. We use this technique
to ensure that SELECTs which join several INFORMATION SCHEMA tables read
the same version of the cache. */
#define CACHE_MIN_IDLE_TIME_US 100000 /* 0.1 sec */
#define CACHE_MIN_IDLE_TIME_NS 100000000 /* 0.1 sec */
/*******************************************************************//**
Checks if the cache can safely be updated.
@return TRUE if can be updated */
static
ibool
can_cache_be_updated(
/*=================*/
trx_i_s_cache_t* cache) /*!< in: cache */
@return whether the cache can be updated */
static bool can_cache_be_updated(trx_i_s_cache_t* cache)
{
uintmax_t now;
/* Here we read cache->last_read without acquiring its mutex
because last_read is only updated when a shared rw lock on the
whole cache is being held (see trx_i_s_cache_end_read()) and
......@@ -1208,13 +1201,7 @@ can_cache_be_updated(
ut_ad(rw_lock_own(&cache->rw_lock, RW_LOCK_X));
now = ut_time_us(NULL);
if (now - cache->last_read > CACHE_MIN_IDLE_TIME_US) {
return(TRUE);
}
return(FALSE);
return my_interval_timer() - cache->last_read > CACHE_MIN_IDLE_TIME_NS;
}
/*******************************************************************//**
......@@ -1358,8 +1345,7 @@ trx_i_s_possibly_fetch_data_into_cache(
lock_mutex_exit();
/* update cache last read time */
time_t now = ut_time_us(NULL);
cache->last_read = now;
cache->last_read = my_interval_timer();
return(0);
}
......@@ -1449,12 +1435,10 @@ trx_i_s_cache_end_read(
/*===================*/
trx_i_s_cache_t* cache) /*!< in: cache */
{
uintmax_t now;
ut_ad(rw_lock_own(&cache->rw_lock, RW_LOCK_S));
/* update cache last read time */
now = ut_time_us(NULL);
const ulonglong now = my_interval_timer();
mutex_enter(&cache->last_read_mutex);
cache->last_read = now;
mutex_exit(&cache->last_read_mutex);
......
......@@ -144,25 +144,6 @@ ut_usectime(
return(ret);
}
/**********************************************************//**
Returns the number of microseconds since epoch. Similar to
time(3), the return value is also stored in *tloc, provided
that tloc is non-NULL.
@return us since epoch */
uintmax_t
ut_time_us(
/*=======*/
uintmax_t* tloc) /*!< out: us since epoch, if non-NULL */
{
uintmax_t us = my_interval_timer() / 1000;
if (tloc != NULL) {
*tloc = us;
}
return(us);
}
/**********************************************************//**
Returns the number of milliseconds since some epoch. The
value may wrap around. It should only be used for heuristic
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment