Commit 6fd7b63c authored by calvin's avatar calvin

branches/zip: Use the Windows Interlocked functions for atomic memory

access

Mapping the atomic operations to Windows Interlocked functions:

os_compare_and_swap_* to InterlockedCompareExchange(64)
os_atomic_increment_* to InterlockedExchangeAdd(64)
os_atomic_test_and_set_byte to InterlockedExchange

In this patch, the legacy code under UNIV_CAN_USE_X86_ASSEMBLER is
removed all together, and add HAVE_WINDOWS_ATOMICS and
INNODB_RW_LOCKS_USE_ATOMICS to CMakeLists.txt

This is to address mantis issue#194.

rb://113

Approved by: Marko
parent 124945b4
...@@ -101,7 +101,9 @@ SET(INNODB_SOURCES btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c btr/btr0sea.c ...@@ -101,7 +101,9 @@ SET(INNODB_SOURCES btr/btr0btr.c btr/btr0cur.c btr/btr0pcur.c btr/btr0sea.c
ut/ut0list.c ut/ut0wqueue.c) ut/ut0list.c ut/ut0wqueue.c)
IF(NOT SOURCE_SUBLIBS) IF(NOT SOURCE_SUBLIBS)
ADD_DEFINITIONS(-D_WIN32) # INNODB_RW_LOCKS_USE_ATOMICS may be defined only if HAVE_WINDOWS_ATOMICS is defined.
# Windows Interlocked functions require Windows 2000 or newer operating system
ADD_DEFINITIONS(-D_WIN32 -DHAVE_WINDOWS_ATOMICS -DINNODB_RW_LOCKS_USE_ATOMICS)
ADD_LIBRARY(innobase STATIC ${INNODB_SOURCES}) ADD_LIBRARY(innobase STATIC ${INNODB_SOURCES})
# Require mysqld_error.h, which is built as part of the GenError # Require mysqld_error.h, which is built as part of the GenError
ADD_DEPENDENCIES(innobase GenError) ADD_DEPENDENCIES(innobase GenError)
......
...@@ -347,6 +347,39 @@ amount of increment. */ ...@@ -347,6 +347,39 @@ amount of increment. */
Returns the old value of *ptr, atomically sets *ptr to new_val */ Returns the old value of *ptr, atomically sets *ptr to new_val */
# define os_atomic_test_and_set_byte(ptr, new_val) \ # define os_atomic_test_and_set_byte(ptr, new_val) \
atomic_swap_uchar(ptr, new_val) atomic_swap_uchar(ptr, new_val)
/* On Windows, use Windows atomics / interlocked */
#elif defined(HAVE_WINDOWS_ATOMICS)
# ifdef _WIN64
# define win_cmp_and_xchg InterlockedCompareExchange64
# define win_xchg_and_add InterlockedExchangeAdd64
# else /* _WIN64 */
# define win_cmp_and_xchg InterlockedCompareExchange
# define win_xchg_and_add InterlockedExchangeAdd
# endif
/**************************************************************
Returns true if swapped, ptr is pointer to target, old_val is value to
compare to, new_val is the value to swap in. */
# define os_compare_and_swap_ulint(ptr, old_val, new_val) \
(win_cmp_and_xchg(ptr, new_val, old_val) == old_val)
# define os_compare_and_swap_lint(ptr, old_val, new_val) \
(win_cmp_and_xchg(ptr, new_val, old_val) == old_val)
# ifdef INNODB_RW_LOCKS_USE_ATOMICS
# define os_compare_and_swap_thread_id(ptr, old_val, new_val) \
(InterlockedCompareExchange(ptr, new_val, old_val) == old_val)
# endif /* INNODB_RW_LOCKS_USE_ATOMICS */
/**************************************************************
Returns the resulting value, ptr is pointer to target, amount is the
amount of increment. */
# define os_atomic_increment_lint(ptr, amount) \
(win_xchg_and_add(ptr, amount) + amount)
# define os_atomic_increment_ulint(ptr, amount) \
((ulint) (win_xchg_and_add(ptr, amount) + amount))
/**************************************************************
Returns the old value of *ptr, atomically sets *ptr to new_val.
InterlockedExchange() operates on LONG, and the LONG will be
clobbered */
# define os_atomic_test_and_set_byte(ptr, new_val) \
((byte) InterlockedExchange(ptr, new_val))
#endif /* HAVE_GCC_ATOMIC_BUILTINS */ #endif /* HAVE_GCC_ATOMIC_BUILTINS */
#ifndef UNIV_NONINL #ifndef UNIV_NONINL
......
...@@ -42,6 +42,13 @@ Created 9/5/1995 Heikki Tuuri ...@@ -42,6 +42,13 @@ Created 9/5/1995 Heikki Tuuri
extern my_bool timed_mutexes; extern my_bool timed_mutexes;
#ifdef HAVE_WINDOWS_ATOMICS
typedef LONG lock_word_t; /* On Windows, InterlockedExchange operates
on LONG variable */
#else
typedef byte lock_word_t;
#endif
/********************************************************************** /**********************************************************************
Initializes the synchronization data structures. */ Initializes the synchronization data structures. */
UNIV_INTERN UNIV_INTERN
...@@ -258,7 +265,7 @@ mutex_n_reserved(void); ...@@ -258,7 +265,7 @@ mutex_n_reserved(void);
NOT to be used outside this module except in debugging! Gets the value NOT to be used outside this module except in debugging! Gets the value
of the lock word. */ of the lock word. */
UNIV_INLINE UNIV_INLINE
byte lock_word_t
mutex_get_lock_word( mutex_get_lock_word(
/*================*/ /*================*/
const mutex_t* mutex); /* in: mutex */ const mutex_t* mutex); /* in: mutex */
...@@ -484,16 +491,14 @@ implementation of a mutual exclusion semaphore. */ ...@@ -484,16 +491,14 @@ implementation of a mutual exclusion semaphore. */
struct mutex_struct { struct mutex_struct {
os_event_t event; /* Used by sync0arr.c for the wait queue */ os_event_t event; /* Used by sync0arr.c for the wait queue */
byte lock_word; /* This byte is the target of the atomic volatile lock_word_t lock_word; /* lock_word is the target
test-and-set instruction in Win32 and of the atomic test-and-set instruction when
when atomic operations are enabled on other atomic operations are enabled. */
platforms. */
#if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER) #if !defined(HAVE_ATOMIC_BUILTINS)
#elif defined(HAVE_ATOMIC_BUILTINS)
#else
os_fast_mutex_t os_fast_mutex_t
os_fast_mutex; /* In other systems we use this OS mutex os_fast_mutex; /* We use this OS mutex in place of lock_word
in place of lock_word */ when atomic operations are not enabled */
#endif #endif
ulint waiters; /* This ulint is set to 1 if there are (or ulint waiters; /* This ulint is set to 1 if there are (or
may be) threads waiting in the global wait may be) threads waiting in the global wait
......
...@@ -79,39 +79,7 @@ mutex_test_and_set( ...@@ -79,39 +79,7 @@ mutex_test_and_set(
1 */ 1 */
mutex_t* mutex) /* in: mutex */ mutex_t* mutex) /* in: mutex */
{ {
#if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER) #if defined(HAVE_ATOMIC_BUILTINS)
byte res;
byte* lw; /* assembler code is used to ensure that
lock_word is loaded from memory */
ut_ad(mutex);
ut_ad(sizeof(byte) == 1);
lw = &(mutex->lock_word);
__asm MOV ECX, lw
__asm MOV EDX, 1
__asm XCHG DL, BYTE PTR [ECX]
__asm MOV res, DL
/* The fence below would prevent this thread from
reading the data structure protected by the mutex
before the test-and-set operation is committed, but
the fence is apparently not needed:
In a posting to comp.arch newsgroup (August 10, 1997)
Andy Glew said that in P6 a LOCKed instruction like
XCHG establishes a fence with respect to memory reads
and writes and thus an explicit fence is not
needed. In P5 he seemed to agree with a previous
newsgroup poster that LOCKed instructions serialize
all instruction execution, and, consequently, also
memory operations. This is confirmed in Intel Software
Dev. Manual, Vol. 3. */
/* mutex_fence(); */
return(res);
#elif defined(HAVE_ATOMIC_BUILTINS)
return(os_atomic_test_and_set_byte(&mutex->lock_word, 1)); return(os_atomic_test_and_set_byte(&mutex->lock_word, 1));
#else #else
ibool ret; ibool ret;
...@@ -139,17 +107,7 @@ mutex_reset_lock_word( ...@@ -139,17 +107,7 @@ mutex_reset_lock_word(
/*==================*/ /*==================*/
mutex_t* mutex) /* in: mutex */ mutex_t* mutex) /* in: mutex */
{ {
#if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER) #if defined(HAVE_ATOMIC_BUILTINS)
byte* lw; /* assembler code is used to ensure that
lock_word is loaded from memory */
ut_ad(mutex);
lw = &(mutex->lock_word);
__asm MOV EDX, 0
__asm MOV ECX, lw
__asm XCHG DL, BYTE PTR [ECX]
#elif defined(HAVE_ATOMIC_BUILTINS)
/* In theory __sync_lock_release should be used to release the lock. /* In theory __sync_lock_release should be used to release the lock.
Unfortunately, it does not work properly alone. The workaround is Unfortunately, it does not work properly alone. The workaround is
that more conservative __sync_lock_test_and_set is used instead. */ that more conservative __sync_lock_test_and_set is used instead. */
...@@ -164,18 +122,14 @@ mutex_reset_lock_word( ...@@ -164,18 +122,14 @@ mutex_reset_lock_word(
/********************************************************************** /**********************************************************************
Gets the value of the lock word. */ Gets the value of the lock word. */
UNIV_INLINE UNIV_INLINE
byte lock_word_t
mutex_get_lock_word( mutex_get_lock_word(
/*================*/ /*================*/
const mutex_t* mutex) /* in: mutex */ const mutex_t* mutex) /* in: mutex */
{ {
const volatile byte* ptr; /* declared volatile to ensure that
lock_word is loaded from memory */
ut_ad(mutex); ut_ad(mutex);
ptr = &(mutex->lock_word); return(mutex->lock_word);
return(*ptr);
} }
/********************************************************************** /**********************************************************************
......
...@@ -70,9 +70,10 @@ the virtual method table (vtable) in GCC 3. */ ...@@ -70,9 +70,10 @@ the virtual method table (vtable) in GCC 3. */
# include <windows.h> # include <windows.h>
# if !defined(WIN64) && !defined(_WIN64) # if defined(HAVE_WINDOWS_ATOMICS)
# define UNIV_CAN_USE_X86_ASSEMBLER /* If atomics are defined we use them in InnoDB mutex implementation */
# endif # define HAVE_ATOMIC_BUILTINS
# endif /* HAVE_WINDOWS_ATOMICS */
# ifdef _NT_ # ifdef _NT_
# define __NT__ # define __NT__
...@@ -106,17 +107,12 @@ if we are compiling on Windows. */ ...@@ -106,17 +107,12 @@ if we are compiling on Windows. */
# include <sched.h> # include <sched.h>
# endif # endif
/* When compiling for Itanium IA64, undefine the flag below to prevent use # if defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_SOLARIS_ATOMICS) \
of the 32-bit x86 assembler in mutex operations. */ || defined(HAVE_WINDOWS_ATOMICS)
# if defined(__WIN__) && !defined(WIN64) && !defined(_WIN64)
# define UNIV_CAN_USE_X86_ASSEMBLER
# endif
# if defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_SOLARIS_ATOMICS)
/* If atomics are defined we use them in InnoDB mutex implementation */ /* If atomics are defined we use them in InnoDB mutex implementation */
# define HAVE_ATOMIC_BUILTINS # define HAVE_ATOMIC_BUILTINS
# endif /* (HAVE_GCC_ATOMIC_BUILTINS) || (HAVE_SOLARIS_ATOMICS) */ # endif /* (HAVE_GCC_ATOMIC_BUILTINS) || (HAVE_SOLARIS_ATOMICS)
|| (HAVE_WINDOWS_ATOMICS) */
/* For InnoDB rw_locks to work with atomics we need the thread_id /* For InnoDB rw_locks to work with atomics we need the thread_id
to be no more than machine word wide. The following enables using to be no more than machine word wide. The following enables using
......
...@@ -1093,6 +1093,14 @@ innobase_start_or_create_for_mysql(void) ...@@ -1093,6 +1093,14 @@ innobase_start_or_create_for_mysql(void)
fprintf(stderr, fprintf(stderr,
"InnoDB: Mutexes use Solaris atomic functions.\n"); "InnoDB: Mutexes use Solaris atomic functions.\n");
# endif /* INNODB_RW_LOCKS_USE_ATOMICS */ # endif /* INNODB_RW_LOCKS_USE_ATOMICS */
#elif HAVE_WINDOWS_ATOMICS
# ifdef INNODB_RW_LOCKS_USE_ATOMICS
fprintf(stderr,
"InnoDB: Mutexes and rw_locks use Windows interlocked functions.\n");
# else
fprintf(stderr,
"InnoDB: Mutexes use Windows interlocked functions.\n");
# endif /* INNODB_RW_LOCKS_USE_ATOMICS */
#else /* HAVE_GCC_ATOMIC_BUILTINS */ #else /* HAVE_GCC_ATOMIC_BUILTINS */
fprintf(stderr, fprintf(stderr,
"InnoDB: Neither mutexes nor rw_locks use GCC atomic builtins.\n"); "InnoDB: Neither mutexes nor rw_locks use GCC atomic builtins.\n");
......
...@@ -235,9 +235,7 @@ mutex_create_func( ...@@ -235,9 +235,7 @@ mutex_create_func(
const char* cfile_name, /* in: file name where created */ const char* cfile_name, /* in: file name where created */
ulint cline) /* in: file line where created */ ulint cline) /* in: file line where created */
{ {
#if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER) #if defined(HAVE_ATOMIC_BUILTINS)
mutex_reset_lock_word(mutex);
#elif defined(HAVE_ATOMIC_BUILTINS)
mutex_reset_lock_word(mutex); mutex_reset_lock_word(mutex);
#else #else
os_fast_mutex_init(&(mutex->os_fast_mutex)); os_fast_mutex_init(&(mutex->os_fast_mutex));
...@@ -327,9 +325,7 @@ mutex_free( ...@@ -327,9 +325,7 @@ mutex_free(
os_event_free(mutex->event); os_event_free(mutex->event);
#if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER) #if !defined(HAVE_ATOMIC_BUILTINS)
#elif defined(HAVE_ATOMIC_BUILTINS)
#else
os_fast_mutex_free(&(mutex->os_fast_mutex)); os_fast_mutex_free(&(mutex->os_fast_mutex));
#endif #endif
/* If we free the mutex protecting the mutex list (freeing is /* If we free the mutex protecting the mutex list (freeing is
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment