Commit ec0c9f27 authored by Dmitriy Vyukov's avatar Dmitriy Vyukov

sync: use RunParallel in benchmarks

LGTM=bradfitz
R=golang-codereviews, bradfitz
CC=golang-codereviews
https://golang.org/cl/68050043
parent 1c986191
......@@ -9,7 +9,6 @@ package sync_test
import (
"runtime"
. "sync"
"sync/atomic"
"testing"
)
......@@ -90,63 +89,34 @@ func BenchmarkMutexUncontended(b *testing.B) {
Mutex
pad [128]uint8
}
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
for p := 0; p < procs; p++ {
go func() {
b.RunParallel(func(pb *testing.PB) {
var mu PaddedMutex
for atomic.AddInt32(&N, -1) >= 0 {
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
for pb.Next() {
mu.Lock()
mu.Unlock()
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
})
}
func benchmarkMutex(b *testing.B, slack, work bool) {
const (
CallsPerSched = 1000
LocalWork = 100
GoroutineSlack = 10
)
procs := runtime.GOMAXPROCS(-1)
var mu Mutex
if slack {
procs *= GoroutineSlack
b.SetParallelism(10)
}
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
var mu Mutex
for p := 0; p < procs; p++ {
go func() {
b.RunParallel(func(pb *testing.PB) {
foo := 0
for atomic.AddInt32(&N, -1) >= 0 {
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
for pb.Next() {
mu.Lock()
mu.Unlock()
if work {
for i := 0; i < LocalWork; i++ {
for i := 0; i < 100; i++ {
foo *= 2
foo /= 2
}
}
}
}
c <- foo == 42
}()
}
for p := 0; p < procs; p++ {
<-c
}
_ = foo
})
}
func BenchmarkMutex(b *testing.B) {
......
......@@ -5,9 +5,7 @@
package sync_test
import (
"runtime"
. "sync"
"sync/atomic"
"testing"
)
......@@ -62,24 +60,11 @@ func TestOncePanic(t *testing.T) {
}
func BenchmarkOnce(b *testing.B) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
var once Once
f := func() {}
c := make(chan bool, procs)
for p := 0; p < procs; p++ {
go func() {
for atomic.AddInt32(&N, -1) >= 0 {
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
once.Do(f)
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
})
}
......@@ -128,34 +128,18 @@ func TestPoolStress(t *testing.T) {
func BenchmarkPool(b *testing.B) {
var p Pool
var wg WaitGroup
n0 := uintptr(b.N)
n := n0
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
wg.Add(1)
go func() {
defer wg.Done()
for atomic.AddUintptr(&n, ^uintptr(0)) < n0 {
for b := 0; b < 100; b++ {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
p.Put(1)
p.Get()
}
}
}()
}
wg.Wait()
})
}
func BenchmarkPoolOverlflow(b *testing.B) {
var p Pool
var wg WaitGroup
n0 := uintptr(b.N)
n := n0
for i := 0; i < runtime.GOMAXPROCS(0); i++ {
wg.Add(1)
go func() {
defer wg.Done()
for atomic.AddUintptr(&n, ^uintptr(0)) < n0 {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
for b := 0; b < 100; b++ {
p.Put(1)
}
......@@ -163,7 +147,5 @@ func BenchmarkPoolOverlflow(b *testing.B) {
p.Get()
}
}
}()
}
wg.Wait()
})
}
......@@ -7,7 +7,6 @@ package sync_test
import (
"runtime"
. "sync"
"sync/atomic"
"testing"
)
......@@ -16,72 +15,44 @@ func BenchmarkSemaUncontended(b *testing.B) {
sem uint32
pad [32]uint32
}
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
for p := 0; p < procs; p++ {
go func() {
b.RunParallel(func(pb *testing.PB) {
sem := new(PaddedSem)
for atomic.AddInt32(&N, -1) >= 0 {
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
for pb.Next() {
Runtime_Semrelease(&sem.sem)
Runtime_Semacquire(&sem.sem)
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
})
}
func benchmarkSema(b *testing.B, block, work bool) {
const CallsPerSched = 1000
const LocalWork = 100
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
c2 := make(chan bool, procs/2)
sem := uint32(0)
if block {
for p := 0; p < procs/2; p++ {
done := make(chan bool)
go func() {
for p := 0; p < runtime.GOMAXPROCS(0)/2; p++ {
Runtime_Semacquire(&sem)
c2 <- true
}()
}
done <- true
}()
defer func() {
<-done
}()
}
for p := 0; p < procs; p++ {
go func() {
b.RunParallel(func(pb *testing.PB) {
foo := 0
for atomic.AddInt32(&N, -1) >= 0 {
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
for pb.Next() {
Runtime_Semrelease(&sem)
if work {
for i := 0; i < LocalWork; i++ {
for i := 0; i < 100; i++ {
foo *= 2
foo /= 2
}
}
Runtime_Semacquire(&sem)
}
}
c <- foo == 42
_ = foo
Runtime_Semrelease(&sem)
}()
}
if block {
for p := 0; p < procs/2; p++ {
<-c2
}
}
for p := 0; p < procs; p++ {
<-c
}
})
}
func BenchmarkSemaSyntNonblock(b *testing.B) {
......
......@@ -160,16 +160,9 @@ func BenchmarkRWMutexUncontended(b *testing.B) {
RWMutex
pad [32]uint32
}
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
for p := 0; p < procs; p++ {
go func() {
b.RunParallel(func(pb *testing.PB) {
var rwm PaddedRWMutex
for atomic.AddInt32(&N, -1) >= 0 {
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
for pb.Next() {
rwm.RLock()
rwm.RLock()
rwm.RUnlock()
......@@ -177,27 +170,14 @@ func BenchmarkRWMutexUncontended(b *testing.B) {
rwm.Lock()
rwm.Unlock()
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
})
}
func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
var rwm RWMutex
for p := 0; p < procs; p++ {
go func() {
b.RunParallel(func(pb *testing.PB) {
foo := 0
for atomic.AddInt32(&N, -1) >= 0 {
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
for pb.Next() {
foo++
if foo%writeRatio == 0 {
rwm.Lock()
......@@ -211,13 +191,8 @@ func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
rwm.RUnlock()
}
}
}
c <- foo == 42
}()
}
for p := 0; p < procs; p++ {
<-c
}
_ = foo
})
}
func BenchmarkRWMutexWrite100(b *testing.B) {
......
......@@ -5,9 +5,7 @@
package sync_test
import (
"runtime"
. "sync"
"sync/atomic"
"testing"
)
......@@ -66,41 +64,21 @@ func BenchmarkWaitGroupUncontended(b *testing.B) {
WaitGroup
pad [128]uint8
}
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
for p := 0; p < procs; p++ {
go func() {
b.RunParallel(func(pb *testing.PB) {
var wg PaddedWaitGroup
for atomic.AddInt32(&N, -1) >= 0 {
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
for pb.Next() {
wg.Add(1)
wg.Done()
wg.Wait()
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
})
}
func benchmarkWaitGroupAddDone(b *testing.B, localWork int) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
var wg WaitGroup
for p := 0; p < procs; p++ {
go func() {
b.RunParallel(func(pb *testing.PB) {
foo := 0
for atomic.AddInt32(&N, -1) >= 0 {
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
for pb.Next() {
wg.Add(1)
for i := 0; i < localWork; i++ {
foo *= 2
......@@ -108,13 +86,8 @@ func benchmarkWaitGroupAddDone(b *testing.B, localWork int) {
}
wg.Done()
}
}
c <- foo == 42
}()
}
for p := 0; p < procs; p++ {
<-c
}
_ = foo
})
}
func BenchmarkWaitGroupAddDone(b *testing.B) {
......@@ -126,34 +99,18 @@ func BenchmarkWaitGroupAddDoneWork(b *testing.B) {
}
func benchmarkWaitGroupWait(b *testing.B, localWork int) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
var wg WaitGroup
wg.Add(procs)
for p := 0; p < procs; p++ {
go wg.Done()
}
for p := 0; p < procs; p++ {
go func() {
b.RunParallel(func(pb *testing.PB) {
foo := 0
for atomic.AddInt32(&N, -1) >= 0 {
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
for pb.Next() {
wg.Wait()
for i := 0; i < localWork; i++ {
foo *= 2
foo /= 2
}
}
}
c <- foo == 42
}()
}
for p := 0; p < procs; p++ {
<-c
}
_ = foo
})
}
func BenchmarkWaitGroupWait(b *testing.B) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment