Commit 079d0a87 authored by Marko Mäkelä's avatar Marko Mäkelä Committed by GitHub

Merge pull request #876 from tempesta-tech/tt-10.1-MDEV-17313-counter-race

MDEV-17313 Data race in ib_counter_t
parents 1655053a 15803fce
......@@ -32,6 +32,7 @@ Created 2012/04/12 by Sunny Bains
#include <string.h>
#include "os0thread.h"
#include "os0sync.h"
#include "my_atomic.h"
/** Default number of slots to use in ib_counter_t */
#define IB_N_SLOTS 64
......@@ -81,8 +82,8 @@ struct thread_id_indexer_t : public generic_indexer_t<Type, N> {
}
};
/** Class for using fuzzy counters. The counter is not protected by any
mutex and the results are not guaranteed to be 100% accurate but close
/** Class for using fuzzy counters. The counter is relaxed atomic
so the results are not guaranteed to be 100% accurate but close
enough. Creates an array of counters and separates each element by the
CACHE_LINE_SIZE bytes */
template <
......@@ -91,20 +92,6 @@ template <
template<typename, int> class Indexer = thread_id_indexer_t>
struct MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_t
{
#ifdef UNIV_DEBUG
~ib_counter_t()
{
size_t n = (CACHE_LINE_SIZE / sizeof(Type));
/* Check that we aren't writing outside our defined bounds. */
for (size_t i = 0; i < UT_ARR_SIZE(m_counter); i += n) {
for (size_t j = 1; j < n - 1; ++j) {
ut_ad(m_counter[i + j] == 0);
}
}
}
#endif /* UNIV_DEBUG */
/** Increment the counter by 1. */
void inc() UNIV_NOTHROW { add(1); }
......@@ -124,15 +111,36 @@ struct MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_t
ut_ad(i < UT_ARR_SIZE(m_counter));
m_counter[i] += n;
if (sizeof(Type) == 8) {
my_atomic_add64_explicit(
reinterpret_cast<int64*>(&m_counter[i]),
static_cast<int64>(n), MY_MEMORY_ORDER_RELAXED);
} else if (sizeof(Type) == 4) {
my_atomic_add32_explicit(
reinterpret_cast<int32*>(&m_counter[i]),
static_cast<int32>(n), MY_MEMORY_ORDER_RELAXED);
}
compile_time_assert(sizeof(Type) == 8 || sizeof(Type) == 4);
}
/* @return total value - not 100% accurate, since it is not atomic. */
/* @return total value - not 100% accurate, since it is relaxed atomic. */
operator Type() const UNIV_NOTHROW {
Type total = 0;
for (size_t i = 0; i < N; ++i) {
total += m_counter[m_policy.offset(i)];
if (sizeof(Type) == 8) {
total += static_cast<
Type>(my_atomic_load64_explicit(
reinterpret_cast<int64*>(const_cast<Type*>(
&m_counter[m_policy.offset(i)])),
MY_MEMORY_ORDER_RELAXED));
} else if (sizeof(Type) == 4) {
total += static_cast<
Type>(my_atomic_load32_explicit(
reinterpret_cast<int32*>(const_cast<Type*>(
&m_counter[m_policy.offset(i)])),
MY_MEMORY_ORDER_RELAXED));
}
}
return(total);
......
......@@ -32,6 +32,7 @@ Created 2012/04/12 by Sunny Bains
#include <string.h>
#include "os0thread.h"
#include "os0sync.h"
#include "my_atomic.h"
/** Default number of slots to use in ib_counter_t */
#define IB_N_SLOTS 64
......@@ -81,8 +82,8 @@ struct thread_id_indexer_t : public generic_indexer_t<Type, N> {
}
};
/** Class for using fuzzy counters. The counter is not protected by any
mutex and the results are not guaranteed to be 100% accurate but close
/** Class for using fuzzy counters. The counter is relaxed atomic
so the results are not guaranteed to be 100% accurate but close
enough. Creates an array of counters and separates each element by the
CACHE_LINE_SIZE bytes */
template <
......@@ -91,20 +92,6 @@ template <
template<typename, int> class Indexer = thread_id_indexer_t>
struct MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_t
{
#ifdef UNIV_DEBUG
~ib_counter_t()
{
size_t n = (CACHE_LINE_SIZE / sizeof(Type));
/* Check that we aren't writing outside our defined bounds. */
for (size_t i = 0; i < UT_ARR_SIZE(m_counter); i += n) {
for (size_t j = 1; j < n - 1; ++j) {
ut_ad(m_counter[i + j] == 0);
}
}
}
#endif /* UNIV_DEBUG */
/** Increment the counter by 1. */
void inc() UNIV_NOTHROW { add(1); }
......@@ -124,15 +111,36 @@ struct MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_t
ut_ad(i < UT_ARR_SIZE(m_counter));
m_counter[i] += n;
if (sizeof(Type) == 8) {
my_atomic_add64_explicit(
reinterpret_cast<int64*>(&m_counter[i]),
static_cast<int64>(n), MY_MEMORY_ORDER_RELAXED);
} else if (sizeof(Type) == 4) {
my_atomic_add32_explicit(
reinterpret_cast<int32*>(&m_counter[i]),
static_cast<int32>(n), MY_MEMORY_ORDER_RELAXED);
}
compile_time_assert(sizeof(Type) == 8 || sizeof(Type) == 4);
}
/* @return total value - not 100% accurate, since it is not atomic. */
/* @return total value - not 100% accurate, since it is relaxed atomic. */
operator Type() const UNIV_NOTHROW {
Type total = 0;
for (size_t i = 0; i < N; ++i) {
total += m_counter[m_policy.offset(i)];
if (sizeof(Type) == 8) {
total += static_cast<
Type>(my_atomic_load64_explicit(
reinterpret_cast<int64*>(const_cast<Type*>(
&m_counter[m_policy.offset(i)])),
MY_MEMORY_ORDER_RELAXED));
} else if (sizeof(Type) == 4) {
total += static_cast<
Type>(my_atomic_load32_explicit(
reinterpret_cast<int32*>(const_cast<Type*>(
&m_counter[m_policy.offset(i)])),
MY_MEMORY_ORDER_RELAXED));
}
}
return(total);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment