Commit a4785fc4 authored by Mikael Ronstrom's avatar Mikael Ronstrom

Moved atomics from 6.0 codebase to prepare for using atomic increments in...

Moved atomics from 6.0 codebase to prepare for using atomic increments in places where LOCK_thread_count previously was used
parent ee696e41
#ifndef ATOMIC_GCC_BUILTINS_INCLUDED
#define ATOMIC_GCC_BUILTINS_INCLUDED
/* Copyright (C) 2008 MySQL AB
This program is free software; you can redistribute it and/or modify
......@@ -15,7 +18,7 @@
#define make_atomic_add_body(S) \
v= __sync_fetch_and_add(a, v);
#define make_atomic_swap_body(S) \
#define make_atomic_fas_body(S) \
v= __sync_lock_test_and_set(a, v);
#define make_atomic_cas_body(S) \
int ## S sav; \
......@@ -25,9 +28,14 @@
#ifdef MY_ATOMIC_MODE_DUMMY
#define make_atomic_load_body(S) ret= *a
#define make_atomic_store_body(S) *a= v
#define MY_ATOMIC_MODE "gcc-builtins-up"
#else
#define MY_ATOMIC_MODE "gcc-builtins-smp"
#define make_atomic_load_body(S) \
ret= __sync_fetch_and_or(a, 0);
#define make_atomic_store_body(S) \
(void) __sync_lock_test_and_set(a, v);
#endif
#endif /* ATOMIC_GCC_BUILTINS_INCLUDED */
/* Copyright (C) 2006-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifndef _atomic_h_cleanup_
#define _atomic_h_cleanup_ "atomic/generic-msvc.h"
/*
We don't implement anything specific for MY_ATOMIC_MODE_DUMMY, always use
intrinsics.
8 and 16-bit atomics are not implemented, but it can be done if necessary.
*/
#undef MY_ATOMIC_HAS_8_16
/*
x86 compilers (both VS2003 or VS2005) never use instrinsics, but generate
function calls to kernel32 instead, even in the optimized build.
We force intrinsics as described in MSDN documentation for
_InterlockedCompareExchange.
*/
#ifdef _M_IX86
#if (_MSC_VER >= 1500)
#include <intrin.h>
#else
C_MODE_START
/*Visual Studio 2003 and earlier do not have prototypes for atomic intrinsics*/
LONG _InterlockedExchange (LONG volatile *Target,LONG Value);
LONG _InterlockedCompareExchange (LONG volatile *Target, LONG Value, LONG Comp);
LONG _InterlockedExchangeAdd (LONG volatile *Addend, LONG Value);
C_MODE_END
#pragma intrinsic(_InterlockedExchangeAdd)
#pragma intrinsic(_InterlockedCompareExchange)
#pragma intrinsic(_InterlockedExchange)
#endif
#define InterlockedExchange _InterlockedExchange
#define InterlockedExchangeAdd _InterlockedExchangeAdd
#define InterlockedCompareExchange _InterlockedCompareExchange
/*
No need to do something special for InterlockedCompareExchangePointer
as it is a #define to InterlockedCompareExchange. The same applies to
InterlockedExchangePointer.
*/
#endif /*_M_IX86*/
#define MY_ATOMIC_MODE "msvc-intrinsics"
#define IL_EXCHG_ADD32(X,Y) InterlockedExchangeAdd((volatile LONG *)(X),(Y))
#define IL_COMP_EXCHG32(X,Y,Z) InterlockedCompareExchange((volatile LONG *)(X),(Y),(Z))
#define IL_COMP_EXCHGptr InterlockedCompareExchangePointer
#define IL_EXCHG32(X,Y) InterlockedExchange((volatile LONG *)(X),(Y))
#define IL_EXCHGptr InterlockedExchangePointer
#define make_atomic_add_body(S) \
v= IL_EXCHG_ADD ## S (a, v)
#define make_atomic_cas_body(S) \
int ## S initial_cmp= *cmp; \
int ## S initial_a= IL_COMP_EXCHG ## S (a, set, initial_cmp); \
if (!(ret= (initial_a == initial_cmp))) *cmp= initial_a;
#define make_atomic_swap_body(S) \
v= IL_EXCHG ## S (a, v)
#define make_atomic_load_body(S) \
ret= 0; /* avoid compiler warning */ \
ret= IL_COMP_EXCHG ## S (a, ret, ret);
/*
my_yield_processor (equivalent of x86 PAUSE instruction) should be used
to improve performance on hyperthreaded CPUs. Intel recommends to use it in
spin loops also on non-HT machines to reduce power consumption (see e.g
http://softwarecommunity.intel.com/articles/eng/2004.htm)
Running benchmarks for spinlocks implemented with InterlockedCompareExchange
and YieldProcessor shows that much better performance is achieved by calling
YieldProcessor in a loop - that is, yielding longer. On Intel boxes setting
loop count in the range 200-300 brought best results.
*/
#ifndef YIELD_LOOPS
#define YIELD_LOOPS 200
#endif
static __inline int my_yield_processor()
{
int i;
for(i=0; i<YIELD_LOOPS; i++)
{
#if (_MSC_VER <= 1310)
/* On older compilers YieldProcessor is not available, use inline assembly*/
__asm { rep nop }
#else
YieldProcessor();
#endif
}
return 1;
}
#define LF_BACKOFF my_yield_processor()
#else /* cleanup */
#undef IL_EXCHG_ADD32
#undef IL_COMP_EXCHG32
#undef IL_COMP_EXCHGptr
#undef IL_EXCHG32
#undef IL_EXCHGptr
#endif
#ifndef ATOMIC_NOLOCK_INCLUDED
#define ATOMIC_NOLOCK_INCLUDED
/* Copyright (C) 2006 MySQL AB
This program is free software; you can redistribute it and/or modify
......@@ -13,43 +16,36 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#if defined(__i386__) || defined(_M_IX86) || defined(HAVE_GCC_ATOMIC_BUILTINS)
#ifdef MY_ATOMIC_MODE_DUMMY
# define LOCK ""
#else
# define LOCK "lock"
#endif
#ifdef HAVE_GCC_ATOMIC_BUILTINS
#include "gcc_builtins.h"
#elif __GNUC__
#include "x86-gcc.h"
#elif defined(_MSC_VER)
#include "x86-msvc.h"
#endif
#if defined(__i386__) || defined(_MSC_VER) || defined(__x86_64__) \
|| defined(HAVE_GCC_ATOMIC_BUILTINS)
# ifdef MY_ATOMIC_MODE_DUMMY
# define LOCK_prefix ""
# else
# define LOCK_prefix "lock"
# endif
# ifdef HAVE_GCC_ATOMIC_BUILTINS
# include "gcc_builtins.h"
# elif __GNUC__
# include "x86-gcc.h"
# elif defined(_MSC_VER)
# include "generic-msvc.h"
# endif
#elif defined(HAVE_SOLARIS_ATOMIC)
#include "solaris.h"
#endif /* __i386__ || _M_IX86 || HAVE_GCC_ATOMIC_BUILTINS */
#endif
#if defined(make_atomic_cas_body) || defined(MY_ATOMICS_MADE)
/*
* We have atomics that require no locking
*/
#define MY_ATOMIC_NOLOCK
#ifdef __SUNPRO_C
/*
* Sun Studio 12 (and likely earlier) does not accept a typedef struct {}
*/
typedef char my_atomic_rwlock_t;
#else
typedef struct { } my_atomic_rwlock_t;
#endif
Type not used so minimal size (emptry struct has different size between C
and C++, zero-length array is gcc-specific).
*/
typedef char my_atomic_rwlock_t __attribute__ ((unused));
#define my_atomic_rwlock_destroy(name)
#define my_atomic_rwlock_init(name)
#define my_atomic_rwlock_rdlock(name)
......@@ -59,3 +55,4 @@ typedef struct { } my_atomic_rwlock_t;
#endif
#endif /* ATOMIC_NOLOCK_INCLUDED */
......@@ -186,25 +186,25 @@ my_atomic_storeptr(void * volatile *a, void *v)
/* ------------------------------------------------------------------------ */
STATIC_INLINE int8
my_atomic_swap8(int8 volatile *a, int8 v)
my_atomic_fas8(int8 volatile *a, int8 v)
{
return ((int8) atomic_swap_8((volatile uint8_t *)a, (uint8_t)v));
}
STATIC_INLINE int16
my_atomic_swap16(int16 volatile *a, int16 v)
my_atomic_fas16(int16 volatile *a, int16 v)
{
return ((int16) atomic_swap_16((volatile uint16_t *)a, (uint16_t)v));
}
STATIC_INLINE int32
my_atomic_swap32(int32 volatile *a, int32 v)
my_atomic_fas32(int32 volatile *a, int32 v)
{
return ((int32) atomic_swap_32((volatile uint32_t *)a, (uint32_t)v));
}
STATIC_INLINE void *
my_atomic_swapptr(void * volatile *a, void *v)
my_atomic_fasptr(void * volatile *a, void *v)
{
return (atomic_swap_ptr(a, v));
}
#ifndef ATOMIC_X86_GCC_INCLUDED
#define ATOMIC_X86_GCC_INCLUDED
/* Copyright (C) 2006 MySQL AB
This program is free software; you can redistribute it and/or modify
......@@ -19,10 +22,18 @@
architectures support double-word (128-bit) cas.
*/
#ifdef MY_ATOMIC_NO_XADD
#define MY_ATOMIC_MODE "gcc-x86" LOCK "-no-xadd"
#ifdef __x86_64__
# ifdef MY_ATOMIC_NO_XADD
# define MY_ATOMIC_MODE "gcc-amd64" LOCK_prefix "-no-xadd"
# else
# define MY_ATOMIC_MODE "gcc-amd64" LOCK_prefix
# endif
#else
#define MY_ATOMIC_MODE "gcc-x86" LOCK
# ifdef MY_ATOMIC_NO_XADD
# define MY_ATOMIC_MODE "gcc-x86" LOCK_prefix "-no-xadd"
# else
# define MY_ATOMIC_MODE "gcc-x86" LOCK_prefix
# endif
#endif
/* fix -ansi errors while maintaining readability */
......@@ -32,12 +43,12 @@
#ifndef MY_ATOMIC_NO_XADD
#define make_atomic_add_body(S) \
asm volatile (LOCK "; xadd %0, %1;" : "+r" (v) , "+m" (*a))
asm volatile (LOCK_prefix "; xadd %0, %1;" : "+r" (v) , "+m" (*a))
#endif
#define make_atomic_swap_body(S) \
asm volatile ("; xchg %0, %1;" : "+q" (v) , "+m" (*a))
#define make_atomic_fas_body(S) \
asm volatile ("xchg %0, %1;" : "+q" (v) , "+m" (*a))
#define make_atomic_cas_body(S) \
asm volatile (LOCK "; cmpxchg %3, %0; setz %2;" \
asm volatile (LOCK_prefix "; cmpxchg %3, %0; setz %2;" \
: "+m" (*a), "+a" (*cmp), "=q" (ret): "r" (set))
#ifdef MY_ATOMIC_MODE_DUMMY
......@@ -46,13 +57,17 @@
#else
/*
Actually 32-bit reads/writes are always atomic on x86
But we add LOCK here anyway to force memory barriers
But we add LOCK_prefix here anyway to force memory barriers
*/
#define make_atomic_load_body(S) \
ret=0; \
asm volatile (LOCK "; cmpxchg %2, %0" \
asm volatile (LOCK_prefix "; cmpxchg %2, %0" \
: "+m" (*a), "+a" (ret): "r" (ret))
#define make_atomic_store_body(S) \
asm volatile ("; xchg %0, %1;" : "+m" (*a) : "r" (v))
asm volatile ("; xchg %0, %1;" : "+m" (*a), "+r" (v))
#endif
/* TODO test on intel whether the below helps. on AMD it makes no difference */
//#define LF_BACKOFF ({asm volatile ("rep; nop"); 1; })
#endif /* ATOMIC_X86_GCC_INCLUDED */
/* Copyright (C) 2006 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
XXX 64-bit atomic operations can be implemented using
cmpxchg8b, if necessary
*/
// Would it be better to use intrinsics ?
// (InterlockedCompareExchange, InterlockedCompareExchange16
// InterlockedExchangeAdd, InterlockedExchange)
#ifndef _atomic_h_cleanup_
#define _atomic_h_cleanup_ "atomic/x86-msvc.h"
#define MY_ATOMIC_MODE "msvc-x86" LOCK
#define make_atomic_add_body(S) \
_asm { \
_asm mov reg_ ## S, v \
_asm LOCK xadd *a, reg_ ## S \
_asm movzx v, reg_ ## S \
}
#define make_atomic_cas_body(S) \
_asm { \
_asm mov areg_ ## S, *cmp \
_asm mov reg2_ ## S, set \
_asm LOCK cmpxchg *a, reg2_ ## S \
_asm mov *cmp, areg_ ## S \
_asm setz al \
_asm movzx ret, al \
}
#define make_atomic_swap_body(S) \
_asm { \
_asm mov reg_ ## S, v \
_asm xchg *a, reg_ ## S \
_asm mov v, reg_ ## S \
}
#ifdef MY_ATOMIC_MODE_DUMMY
#define make_atomic_load_body(S) ret=*a
#define make_atomic_store_body(S) *a=v
#else
/*
Actually 32-bit reads/writes are always atomic on x86
But we add LOCK here anyway to force memory barriers
*/
#define make_atomic_load_body(S) \
_asm { \
_asm mov areg_ ## S, 0 \
_asm mov reg2_ ## S, areg_ ## S \
_asm LOCK cmpxchg *a, reg2_ ## S \
_asm mov ret, areg_ ## S \
}
#define make_atomic_store_body(S) \
_asm { \
_asm mov reg_ ## S, v \
_asm xchg *a, reg_ ## S \
}
#endif
#define reg_8 al
#define reg_16 ax
#define reg_32 eax
#define areg_8 al
#define areg_16 ax
#define areg_32 eax
#define reg2_8 bl
#define reg2_16 bx
#define reg2_32 ebx
#else /* cleanup */
#undef reg_8
#undef reg_16
#undef reg_32
#undef areg_8
#undef areg_16
#undef areg_32
#undef reg2_8
#undef reg2_16
#undef reg2_32
#endif
#ifndef MY_ATOMIC_INCLUDED
#define MY_ATOMIC_INCLUDED
/* Copyright (C) 2006 MySQL AB
This program is free software; you can redistribute it and/or modify
......@@ -13,9 +16,51 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/*
This header defines five atomic operations:
my_atomic_add#(&var, what)
add 'what' to *var, and return the old value of *var
my_atomic_fas#(&var, what)
'Fetch And Store'
store 'what' in *var, and return the old value of *var
my_atomic_cas#(&var, &old, new)
'Compare And Swap'
if *var is equal to *old, then store 'new' in *var, and return TRUE
otherwise store *var in *old, and return FALSE
my_atomic_load#(&var)
return *var
my_atomic_store#(&var, what)
store 'what' in *var
'#' is substituted by a size suffix - 8, 16, 32, or ptr
(e.g. my_atomic_add8, my_atomic_fas32, my_atomic_casptr).
NOTE This operations are not always atomic, so they always must be
enclosed in my_atomic_rwlock_rdlock(lock)/my_atomic_rwlock_rdunlock(lock)
or my_atomic_rwlock_wrlock(lock)/my_atomic_rwlock_wrunlock(lock).
Hint: if a code block makes intensive use of atomic ops, it make sense
to take/release rwlock once for the whole block, not for every statement.
On architectures where these operations are really atomic, rwlocks will
be optimized away.
8- and 16-bit atomics aren't implemented for windows (see generic-msvc.h),
but can be added, if necessary.
*/
#ifndef my_atomic_rwlock_init
#define intptr void *
/**
On most platforms we implement 8-bit, 16-bit, 32-bit and "pointer"
operations. Thus the symbol below is defined by default; platforms
where we leave out 8-bit or 16-bit operations should undefine it.
*/
#define MY_ATOMIC_HAS_8_16 1
#ifndef MY_ATOMIC_MODE_RWLOCKS
/*
......@@ -24,51 +69,117 @@
#include "atomic/nolock.h"
#endif
#ifndef MY_ATOMIC_NOLOCK
/*
* Have to use rw-locks for atomic ops
*/
#ifndef make_atomic_cas_body
/* nolock.h was not able to generate even a CAS function, fall back */
#include "atomic/rwlock.h"
#endif
#ifndef MY_ATOMICS_MADE
#else
/* define missing functions by using the already generated ones */
#ifndef make_atomic_add_body
#define make_atomic_add_body(S) \
int ## S tmp=*a; \
while (!my_atomic_cas ## S(a, &tmp, tmp+v)); \
v=tmp;
#endif
#ifndef make_atomic_fas_body
#define make_atomic_fas_body(S) \
int ## S tmp=*a; \
while (!my_atomic_cas ## S(a, &tmp, v)); \
v=tmp;
#endif
#ifndef make_atomic_load_body
#define make_atomic_load_body(S) \
ret= 0; /* avoid compiler warning */ \
(void)(my_atomic_cas ## S(a, &ret, ret));
#endif
#ifndef make_atomic_store_body
#define make_atomic_store_body(S) \
(void)(my_atomic_fas ## S (a, v));
#endif
#endif
/*
transparent_union doesn't work in g++
Bug ?
Darwin's gcc doesn't want to put pointers in a transparent_union
when built with -arch ppc64. Complains:
warning: 'transparent_union' attribute ignored
*/
#if defined(__GNUC__) && !defined(__cplusplus) && \
! (defined(__APPLE__) && defined(_ARCH_PPC64))
/*
we want to be able to use my_atomic_xxx functions with
both signed and unsigned integers. But gcc will issue a warning
"passing arg N of `my_atomic_XXX' as [un]signed due to prototype"
if the signedness of the argument doesn't match the prototype, or
"pointer targets in passing argument N of my_atomic_XXX differ in signedness"
if int* is used where uint* is expected (or vice versa).
Let's shut these warnings up
*/
#define make_transparent_unions(S) \
typedef union { \
int ## S i; \
uint ## S u; \
} U_ ## S __attribute__ ((transparent_union)); \
typedef union { \
int ## S volatile *i; \
uint ## S volatile *u; \
} Uv_ ## S __attribute__ ((transparent_union));
#define uintptr intptr
make_transparent_unions(8)
make_transparent_unions(16)
make_transparent_unions(32)
make_transparent_unions(ptr)
#undef uintptr
#undef make_transparent_unions
#define a U_a.i
#define cmp U_cmp.i
#define v U_v.i
#define set U_set.i
#else
#define U_8 int8
#define U_16 int16
#define U_32 int32
#define U_ptr intptr
#define Uv_8 int8
#define Uv_16 int16
#define Uv_32 int32
#define Uv_ptr intptr
#define U_a volatile *a
#define U_cmp *cmp
#define U_v v
#define U_set set
#endif /* __GCC__ transparent_union magic */
#ifdef HAVE_INLINE
#define make_atomic_cas(S) \
STATIC_INLINE int my_atomic_cas ## S(Uv_ ## S U_a, \
Uv_ ## S U_cmp, U_ ## S U_set) \
{ \
int8 ret; \
make_atomic_cas_body(S); \
return ret; \
}
#define make_atomic_add(S) \
STATIC_INLINE int ## S my_atomic_add ## S( \
int ## S volatile *a, int ## S v) \
Uv_ ## S U_a, U_ ## S U_v) \
{ \
make_atomic_add_body(S); \
return v; \
}
#define make_atomic_swap(S) \
STATIC_INLINE int ## S my_atomic_swap ## S( \
int ## S volatile *a, int ## S v) \
#define make_atomic_fas(S) \
STATIC_INLINE int ## S my_atomic_fas ## S( \
Uv_ ## S U_a, U_ ## S U_v) \
{ \
make_atomic_swap_body(S); \
make_atomic_fas_body(S); \
return v; \
}
#define make_atomic_cas(S) \
STATIC_INLINE int my_atomic_cas ## S(int ## S volatile *a, \
int ## S *cmp, int ## S set) \
{ \
int8 ret; \
make_atomic_cas_body(S); \
return ret; \
}
#define make_atomic_load(S) \
STATIC_INLINE int ## S my_atomic_load ## S(int ## S volatile *a) \
STATIC_INLINE int ## S my_atomic_load ## S(Uv_ ## S U_a) \
{ \
int ## S ret; \
make_atomic_load_body(S); \
......@@ -77,7 +188,7 @@ STATIC_INLINE int ## S my_atomic_load ## S(int ## S volatile *a) \
#define make_atomic_store(S) \
STATIC_INLINE void my_atomic_store ## S( \
int ## S volatile *a, int ## S v) \
Uv_ ## S U_a, U_ ## S U_v) \
{ \
make_atomic_store_body(S); \
}
......@@ -85,63 +196,96 @@ STATIC_INLINE void my_atomic_store ## S( \
#else /* no inline functions */
#define make_atomic_add(S) \
extern int ## S my_atomic_add ## S(int ## S volatile *a, int ## S v);
extern int ## S my_atomic_add ## S(Uv_ ## S U_a, U_ ## S U_v);
#define make_atomic_swap(S) \
extern int ## S my_atomic_swap ## S(int ## S volatile *a, int ## S v);
#define make_atomic_fas(S) \
extern int ## S my_atomic_fas ## S(Uv_ ## S U_a, U_ ## S U_v);
#define make_atomic_cas(S) \
extern int my_atomic_cas ## S(int ## S volatile *a, int ## S *cmp, int ## S set);
extern int my_atomic_cas ## S(Uv_ ## S U_a, Uv_ ## S U_cmp, U_ ## S U_set);
#define make_atomic_load(S) \
extern int ## S my_atomic_load ## S(int ## S volatile *a);
extern int ## S my_atomic_load ## S(Uv_ ## S U_a);
#define make_atomic_store(S) \
extern void my_atomic_store ## S(int ## S volatile *a, int ## S v);
extern void my_atomic_store ## S(Uv_ ## S U_a, U_ ## S U_v);
#endif /* HAVE_INLINE */
make_atomic_cas( 8)
#ifdef MY_ATOMIC_HAS_8_16
make_atomic_cas(8)
make_atomic_cas(16)
#endif
make_atomic_cas(32)
make_atomic_cas(ptr)
make_atomic_add( 8)
#ifdef MY_ATOMIC_HAS_8_16
make_atomic_add(8)
make_atomic_add(16)
#endif
make_atomic_add(32)
make_atomic_load( 8)
#ifdef MY_ATOMIC_HAS_8_16
make_atomic_load(8)
make_atomic_load(16)
#endif
make_atomic_load(32)
make_atomic_load(ptr)
make_atomic_store( 8)
#ifdef MY_ATOMIC_HAS_8_16
make_atomic_fas(8)
make_atomic_fas(16)
#endif
make_atomic_fas(32)
make_atomic_fas(ptr)
#ifdef MY_ATOMIC_HAS_8_16
make_atomic_store(8)
make_atomic_store(16)
#endif
make_atomic_store(32)
make_atomic_store(ptr)
make_atomic_swap( 8)
make_atomic_swap(16)
make_atomic_swap(32)
make_atomic_swap(ptr)
#ifdef _atomic_h_cleanup_
#include _atomic_h_cleanup_
#undef _atomic_h_cleanup_
#endif
#undef U_8
#undef U_16
#undef U_32
#undef U_ptr
#undef Uv_8
#undef Uv_16
#undef Uv_32
#undef Uv_ptr
#undef a
#undef cmp
#undef v
#undef set
#undef U_a
#undef U_cmp
#undef U_v
#undef U_set
#undef make_atomic_add
#undef make_atomic_cas
#undef make_atomic_load
#undef make_atomic_store
#undef make_atomic_swap
#undef make_atomic_fas
#undef make_atomic_add_body
#undef make_atomic_cas_body
#undef make_atomic_load_body
#undef make_atomic_store_body
#undef make_atomic_swap_body
#undef make_atomic_fas_body
#undef intptr
#endif /* MY_ATOMICS_MADE */
#ifdef _atomic_h_cleanup_
#include _atomic_h_cleanup_
#undef _atomic_h_cleanup_
/*
the macro below defines (as an expression) the code that
will be run in spin-loops. Intel manuals recummend to have PAUSE there.
It is expected to be defined in include/atomic/ *.h files
*/
#ifndef LF_BACKOFF
#define LF_BACKOFF (1)
#endif
#define MY_ATOMIC_OK 0
......@@ -150,3 +294,4 @@ extern int my_atomic_initialize();
#endif
#endif /* MY_ATOMIC_INCLUDED */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment