Commit f1672978 authored by Dmitriy Vyukov's avatar Dmitriy Vyukov

runtime/race: better handling of atomic operations

This change fixes the last known false negative of the race detector --
detection of races between mutating atomic operations and non-atomic operations.
Race runtime already has functions for precise modelling of various atomic operations,
so this change just forwards all atomic ops to race runtime
instead of poor man modeling in sync/atomic package.
Performance is also improved -- full sync/atomic tests run in 60s instead of 85s now.

LGTM=khr
R=golang-codereviews, khr
CC=golang-codereviews, rsc
https://golang.org/cl/111310044
parent 55337807
...@@ -24,6 +24,8 @@ void __tsan_malloc(void); ...@@ -24,6 +24,8 @@ void __tsan_malloc(void);
void __tsan_acquire(void); void __tsan_acquire(void);
void __tsan_release(void); void __tsan_release(void);
void __tsan_release_merge(void); void __tsan_release_merge(void);
void __tsan_go_ignore_sync_begin(void);
void __tsan_go_ignore_sync_end(void);
// Mimic what cmd/cgo would do. // Mimic what cmd/cgo would do.
#pragma cgo_import_static __tsan_init #pragma cgo_import_static __tsan_init
...@@ -36,6 +38,8 @@ void __tsan_release_merge(void); ...@@ -36,6 +38,8 @@ void __tsan_release_merge(void);
#pragma cgo_import_static __tsan_acquire #pragma cgo_import_static __tsan_acquire
#pragma cgo_import_static __tsan_release #pragma cgo_import_static __tsan_release
#pragma cgo_import_static __tsan_release_merge #pragma cgo_import_static __tsan_release_merge
#pragma cgo_import_static __tsan_go_ignore_sync_begin
#pragma cgo_import_static __tsan_go_ignore_sync_end
// These are called from race_amd64.s. // These are called from race_amd64.s.
#pragma cgo_import_static __tsan_read #pragma cgo_import_static __tsan_read
...@@ -47,6 +51,17 @@ void __tsan_release_merge(void); ...@@ -47,6 +51,17 @@ void __tsan_release_merge(void);
#pragma cgo_import_static __tsan_func_enter #pragma cgo_import_static __tsan_func_enter
#pragma cgo_import_static __tsan_func_exit #pragma cgo_import_static __tsan_func_exit
#pragma cgo_import_static __tsan_go_atomic32_load
#pragma cgo_import_static __tsan_go_atomic64_load
#pragma cgo_import_static __tsan_go_atomic32_store
#pragma cgo_import_static __tsan_go_atomic64_store
#pragma cgo_import_static __tsan_go_atomic32_exchange
#pragma cgo_import_static __tsan_go_atomic64_exchange
#pragma cgo_import_static __tsan_go_atomic32_fetch_add
#pragma cgo_import_static __tsan_go_atomic64_fetch_add
#pragma cgo_import_static __tsan_go_atomic32_compare_exchange
#pragma cgo_import_static __tsan_go_atomic64_compare_exchange
extern byte runtime·noptrdata[]; extern byte runtime·noptrdata[];
extern byte runtime·enoptrbss[]; extern byte runtime·enoptrbss[];
...@@ -250,32 +265,20 @@ runtime·RaceReleaseMerge(void *addr) ...@@ -250,32 +265,20 @@ runtime·RaceReleaseMerge(void *addr)
runtime·racereleasemerge(addr); runtime·racereleasemerge(addr);
} }
// func RaceSemacquire(s *uint32)
void
runtime·RaceSemacquire(uint32 *s)
{
runtime·semacquire(s, false);
}
// func RaceSemrelease(s *uint32)
void
runtime·RaceSemrelease(uint32 *s)
{
runtime·semrelease(s);
}
// func RaceDisable() // func RaceDisable()
void void
runtime·RaceDisable(void) runtime·RaceDisable(void)
{ {
g->raceignore++; if(g->raceignore++ == 0)
runtime·racecall(__tsan_go_ignore_sync_begin, g->racectx);
} }
// func RaceEnable() // func RaceEnable()
void void
runtime·RaceEnable(void) runtime·RaceEnable(void)
{ {
g->raceignore--; if(--g->raceignore == 0)
runtime·racecall(__tsan_go_ignore_sync_end, g->racectx);
} }
typedef struct SymbolizeContext SymbolizeContext; typedef struct SymbolizeContext SymbolizeContext;
......
...@@ -9,4 +9,4 @@ $ ./buildgo.sh ...@@ -9,4 +9,4 @@ $ ./buildgo.sh
Tested with gcc 4.6.1 and 4.7.0. On Windows it's built with 64-bit MinGW. Tested with gcc 4.6.1 and 4.7.0. On Windows it's built with 64-bit MinGW.
Current runtime is built on rev 210365. Current runtime is built on rev 215000.
...@@ -225,8 +225,7 @@ func TestNoRaceAtomicStoreCASUint64(t *testing.T) { ...@@ -225,8 +225,7 @@ func TestNoRaceAtomicStoreCASUint64(t *testing.T) {
x = 1 x = 1
} }
// Races with non-atomic loads are not detected. func TestRaceAtomicStoreLoad(t *testing.T) {
func TestRaceFailingAtomicStoreLoad(t *testing.T) {
c := make(chan bool) c := make(chan bool)
var a uint64 var a uint64
go func() { go func() {
...@@ -248,8 +247,7 @@ func TestRaceAtomicLoadStore(t *testing.T) { ...@@ -248,8 +247,7 @@ func TestRaceAtomicLoadStore(t *testing.T) {
<-c <-c
} }
// Races with non-atomic loads are not detected. func TestRaceAtomicAddLoad(t *testing.T) {
func TestRaceFailingAtomicAddLoad(t *testing.T) {
c := make(chan bool) c := make(chan bool)
var a uint64 var a uint64
go func() { go func() {
......
...@@ -79,7 +79,7 @@ TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8 ...@@ -79,7 +79,7 @@ TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8
TEXT runtime·racewritepc(SB), NOSPLIT, $0-24 TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
MOVQ addr+0(FP), RARG1 MOVQ addr+0(FP), RARG1
MOVQ callpc+8(FP), RARG2 MOVQ callpc+8(FP), RARG2
MOVQ cp+16(FP), RARG3 MOVQ pc+16(FP), RARG3
// void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc); // void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
MOVQ $__tsan_write_pc(SB), AX MOVQ $__tsan_write_pc(SB), AX
JMP racecalladdr<>(SB) JMP racecalladdr<>(SB)
...@@ -180,6 +180,141 @@ TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0 ...@@ -180,6 +180,141 @@ TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
MOVQ $__tsan_func_exit(SB), AX MOVQ $__tsan_func_exit(SB), AX
JMP racecall<>(SB) JMP racecall<>(SB)
// Atomic operations for sync/atomic package.
// Load
TEXT syncatomic·LoadInt32(SB), NOSPLIT, $0-0
MOVQ $__tsan_go_atomic32_load(SB), AX
CALL racecallatomic<>(SB)
RET
TEXT syncatomic·LoadInt64(SB), NOSPLIT, $0-0
MOVQ $__tsan_go_atomic64_load(SB), AX
CALL racecallatomic<>(SB)
RET
TEXT syncatomic·LoadUint32(SB), NOSPLIT, $0-0
JMP syncatomic·LoadInt32(SB)
TEXT syncatomic·LoadUint64(SB), NOSPLIT, $0-0
JMP syncatomic·LoadInt64(SB)
TEXT syncatomic·LoadUintptr(SB), NOSPLIT, $0-0
JMP syncatomic·LoadInt64(SB)
TEXT syncatomic·LoadPointer(SB), NOSPLIT, $0-0
JMP syncatomic·LoadInt64(SB)
// Store
TEXT syncatomic·StoreInt32(SB), NOSPLIT, $0-0
MOVQ $__tsan_go_atomic32_store(SB), AX
CALL racecallatomic<>(SB)
RET
TEXT syncatomic·StoreInt64(SB), NOSPLIT, $0-0
MOVQ $__tsan_go_atomic64_store(SB), AX
CALL racecallatomic<>(SB)
RET
TEXT syncatomic·StoreUint32(SB), NOSPLIT, $0-0
JMP syncatomic·StoreInt32(SB)
TEXT syncatomic·StoreUint64(SB), NOSPLIT, $0-0
JMP syncatomic·StoreInt64(SB)
TEXT syncatomic·StoreUintptr(SB), NOSPLIT, $0-0
JMP syncatomic·StoreInt64(SB)
TEXT syncatomic·StorePointer(SB), NOSPLIT, $0-0
JMP syncatomic·StoreInt64(SB)
// Swap
TEXT syncatomic·SwapInt32(SB), NOSPLIT, $0-0
MOVQ $__tsan_go_atomic32_exchange(SB), AX
CALL racecallatomic<>(SB)
RET
TEXT syncatomic·SwapInt64(SB), NOSPLIT, $0-0
MOVQ $__tsan_go_atomic64_exchange(SB), AX
CALL racecallatomic<>(SB)
RET
TEXT syncatomic·SwapUint32(SB), NOSPLIT, $0-0
JMP syncatomic·SwapInt32(SB)
TEXT syncatomic·SwapUint64(SB), NOSPLIT, $0-0
JMP syncatomic·SwapInt64(SB)
TEXT syncatomic·SwapUintptr(SB), NOSPLIT, $0-0
JMP syncatomic·SwapInt64(SB)
TEXT syncatomic·SwapPointer(SB), NOSPLIT, $0-0
JMP syncatomic·SwapInt64(SB)
// Add
TEXT syncatomic·AddInt32(SB), NOSPLIT, $0-0
MOVQ $__tsan_go_atomic32_fetch_add(SB), AX
CALL racecallatomic<>(SB)
MOVL add+8(FP), AX // convert fetch_add to add_fetch
ADDL AX, ret+16(FP)
RET
TEXT syncatomic·AddInt64(SB), NOSPLIT, $0-0
MOVQ $__tsan_go_atomic64_fetch_add(SB), AX
CALL racecallatomic<>(SB)
MOVQ add+8(FP), AX // convert fetch_add to add_fetch
ADDQ AX, ret+16(FP)
RET
TEXT syncatomic·AddUint32(SB), NOSPLIT, $0-0
JMP syncatomic·AddInt32(SB)
TEXT syncatomic·AddUint64(SB), NOSPLIT, $0-0
JMP syncatomic·AddInt64(SB)
TEXT syncatomic·AddUintptr(SB), NOSPLIT, $0-0
JMP syncatomic·AddInt64(SB)
TEXT syncatomic·AddPointer(SB), NOSPLIT, $0-0
JMP syncatomic·AddInt64(SB)
// CompareAndSwap
TEXT syncatomic·CompareAndSwapInt32(SB), NOSPLIT, $0-0
MOVQ $__tsan_go_atomic32_compare_exchange(SB), AX
CALL racecallatomic<>(SB)
RET
TEXT syncatomic·CompareAndSwapInt64(SB), NOSPLIT, $0-0
MOVQ $__tsan_go_atomic64_compare_exchange(SB), AX
CALL racecallatomic<>(SB)
RET
TEXT syncatomic·CompareAndSwapUint32(SB), NOSPLIT, $0-0
JMP syncatomic·CompareAndSwapInt32(SB)
TEXT syncatomic·CompareAndSwapUint64(SB), NOSPLIT, $0-0
JMP syncatomic·CompareAndSwapInt64(SB)
TEXT syncatomic·CompareAndSwapUintptr(SB), NOSPLIT, $0-0
JMP syncatomic·CompareAndSwapInt64(SB)
TEXT syncatomic·CompareAndSwapPointer(SB), NOSPLIT, $0-0
JMP syncatomic·CompareAndSwapInt64(SB)
// Generic atomic operation implementation.
// AX already contains target function.
TEXT racecallatomic<>(SB), NOSPLIT, $0-0
// Trigger SIGSEGV early.
MOVQ 16(SP), R12
MOVL (R12), R12
get_tls(R12)
MOVQ g(R12), R14
MOVQ g_racectx(R14), RARG0 // goroutine context
MOVQ 8(SP), RARG1 // caller pc
MOVQ (SP), RARG2 // pc
LEAQ 16(SP), RARG3 // arguments
JMP racecall<>(SB)
// void runtime·racecall(void(*f)(...), ...) // void runtime·racecall(void(*f)(...), ...)
// Calls C function f from race runtime and passes up to 4 arguments to it. // Calls C function f from race runtime and passes up to 4 arguments to it.
// The arguments are never heap-object-preserving pointers, so we pretend there are no arguments. // The arguments are never heap-object-preserving pointers, so we pretend there are no arguments.
......
...@@ -2,8 +2,6 @@ ...@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build !race
// Package atomic provides low-level atomic memory primitives // Package atomic provides low-level atomic memory primitives
// useful for implementing synchronization algorithms. // useful for implementing synchronization algorithms.
// //
......
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build race
package atomic
import (
"runtime"
"unsafe"
)
// We use runtime.RaceRead() inside of atomic operations to catch races
// between atomic and non-atomic operations. It will also catch races
// between Mutex.Lock() and mutex overwrite (mu = Mutex{}). Since we use
// only RaceRead() we won't catch races with non-atomic loads.
// Otherwise (if we use RaceWrite()) we will report races
// between atomic operations (false positives).
var mtx uint32 = 1 // same for all
func SwapInt32(addr *int32, new int32) (old int32) {
return int32(SwapUint32((*uint32)(unsafe.Pointer(addr)), uint32(new)))
}
func SwapUint32(addr *uint32, new uint32) (old uint32) {
_ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
runtime.RaceAcquire(unsafe.Pointer(addr))
old = *addr
*addr = new
runtime.RaceReleaseMerge(unsafe.Pointer(addr))
runtime.RaceSemrelease(&mtx)
return
}
func SwapInt64(addr *int64, new int64) (old int64) {
return int64(SwapUint64((*uint64)(unsafe.Pointer(addr)), uint64(new)))
}
func SwapUint64(addr *uint64, new uint64) (old uint64) {
_ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
runtime.RaceAcquire(unsafe.Pointer(addr))
old = *addr
*addr = new
runtime.RaceReleaseMerge(unsafe.Pointer(addr))
runtime.RaceSemrelease(&mtx)
return
}
func SwapUintptr(addr *uintptr, new uintptr) (old uintptr) {
return uintptr(SwapPointer((*unsafe.Pointer)(unsafe.Pointer(addr)), unsafe.Pointer(new)))
}
func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer) {
_ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
runtime.RaceAcquire(unsafe.Pointer(addr))
old = *addr
*addr = new
runtime.RaceReleaseMerge(unsafe.Pointer(addr))
runtime.RaceSemrelease(&mtx)
return
}
func CompareAndSwapInt32(val *int32, old, new int32) bool {
return CompareAndSwapUint32((*uint32)(unsafe.Pointer(val)), uint32(old), uint32(new))
}
func CompareAndSwapUint32(val *uint32, old, new uint32) (swapped bool) {
_ = *val
swapped = false
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
runtime.RaceAcquire(unsafe.Pointer(val))
if *val == old {
*val = new
swapped = true
runtime.RaceReleaseMerge(unsafe.Pointer(val))
}
runtime.RaceSemrelease(&mtx)
return
}
func CompareAndSwapInt64(val *int64, old, new int64) bool {
return CompareAndSwapUint64((*uint64)(unsafe.Pointer(val)), uint64(old), uint64(new))
}
func CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool) {
_ = *val
swapped = false
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
runtime.RaceAcquire(unsafe.Pointer(val))
if *val == old {
*val = new
swapped = true
runtime.RaceReleaseMerge(unsafe.Pointer(val))
}
runtime.RaceSemrelease(&mtx)
return
}
func CompareAndSwapPointer(val *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool) {
_ = *val
swapped = false
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
runtime.RaceAcquire(unsafe.Pointer(val))
if *val == old {
*val = new
swapped = true
runtime.RaceReleaseMerge(unsafe.Pointer(val))
}
runtime.RaceSemrelease(&mtx)
return
}
func CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool) {
_ = *val
swapped = false
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
runtime.RaceAcquire(unsafe.Pointer(val))
if *val == old {
*val = new
swapped = true
runtime.RaceReleaseMerge(unsafe.Pointer(val))
}
runtime.RaceSemrelease(&mtx)
return
}
func AddInt32(val *int32, delta int32) int32 {
return int32(AddUint32((*uint32)(unsafe.Pointer(val)), uint32(delta)))
}
func AddUint32(val *uint32, delta uint32) (new uint32) {
_ = *val
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
runtime.RaceAcquire(unsafe.Pointer(val))
*val = *val + delta
new = *val
runtime.RaceReleaseMerge(unsafe.Pointer(val))
runtime.RaceSemrelease(&mtx)
return
}
func AddInt64(val *int64, delta int64) int64 {
return int64(AddUint64((*uint64)(unsafe.Pointer(val)), uint64(delta)))
}
func AddUint64(val *uint64, delta uint64) (new uint64) {
_ = *val
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
runtime.RaceAcquire(unsafe.Pointer(val))
*val = *val + delta
new = *val
runtime.RaceReleaseMerge(unsafe.Pointer(val))
runtime.RaceSemrelease(&mtx)
return
}
func AddUintptr(val *uintptr, delta uintptr) (new uintptr) {
_ = *val
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(val))
runtime.RaceAcquire(unsafe.Pointer(val))
*val = *val + delta
new = *val
runtime.RaceReleaseMerge(unsafe.Pointer(val))
runtime.RaceSemrelease(&mtx)
return
}
func LoadInt32(addr *int32) int32 {
return int32(LoadUint32((*uint32)(unsafe.Pointer(addr))))
}
func LoadUint32(addr *uint32) (val uint32) {
_ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
runtime.RaceAcquire(unsafe.Pointer(addr))
val = *addr
runtime.RaceSemrelease(&mtx)
return
}
func LoadInt64(addr *int64) int64 {
return int64(LoadUint64((*uint64)(unsafe.Pointer(addr))))
}
func LoadUint64(addr *uint64) (val uint64) {
_ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
runtime.RaceAcquire(unsafe.Pointer(addr))
val = *addr
runtime.RaceSemrelease(&mtx)
return
}
func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer) {
_ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
runtime.RaceAcquire(unsafe.Pointer(addr))
val = *addr
runtime.RaceSemrelease(&mtx)
return
}
func LoadUintptr(addr *uintptr) (val uintptr) {
_ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
runtime.RaceAcquire(unsafe.Pointer(addr))
val = *addr
runtime.RaceSemrelease(&mtx)
return
}
func StoreInt32(addr *int32, val int32) {
StoreUint32((*uint32)(unsafe.Pointer(addr)), uint32(val))
}
func StoreUint32(addr *uint32, val uint32) {
_ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
*addr = val
runtime.RaceRelease(unsafe.Pointer(addr))
runtime.RaceSemrelease(&mtx)
}
func StoreInt64(addr *int64, val int64) {
StoreUint64((*uint64)(unsafe.Pointer(addr)), uint64(val))
}
func StoreUint64(addr *uint64, val uint64) {
_ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
*addr = val
runtime.RaceRelease(unsafe.Pointer(addr))
runtime.RaceSemrelease(&mtx)
}
func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer) {
_ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
*addr = val
runtime.RaceRelease(unsafe.Pointer(addr))
runtime.RaceSemrelease(&mtx)
}
func StoreUintptr(addr *uintptr, val uintptr) {
_ = *addr
runtime.RaceSemacquire(&mtx)
runtime.RaceRead(unsafe.Pointer(addr))
*addr = val
runtime.RaceRelease(unsafe.Pointer(addr))
runtime.RaceSemrelease(&mtx)
}
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build race
// This file is here only to allow external functions.
// The operations are implemented in src/pkg/runtime/race_amd64.s
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment