Commit 0d377587 authored by Kirill Smelkov's avatar Kirill Smelkov

X zodb/cache: Benchmarks

So that we can measure how every change affects cache performance.

Added Cache.Close() along the way, since if without it, there was no way
to release gcmain goroutine stack and with many benchmarks run this was
causing out of memory.
parent b47715f9
...@@ -17,6 +17,8 @@ runqsteal ...@@ -17,6 +17,8 @@ runqsteal
? runqgrab ? runqgrab
schedule <- top entry schedule <- top entry
868c8b37 (runtime: only sleep before stealing work from a running P) 868c8b37 (runtime: only sleep before stealing work from a running P)
b75b4d0e (runtime: skip netpoll check if there are no waiters)
-> stopm() // stop and restart m after waiting for work -> stopm() // stop and restart m after waiting for work
notesleep(m.park) notesleep(m.park)
......
...@@ -128,7 +128,7 @@ func NewCache(loader StorLoader, sizeMax int) *Cache { ...@@ -128,7 +128,7 @@ func NewCache(loader StorLoader, sizeMax int) *Cache {
sizeMax: sizeMax, sizeMax: sizeMax,
} }
c.lru.Init() c.lru.Init()
go c.gcmain() // TODO stop it on .Close() go c.gcmain()
return c return c
} }
...@@ -148,6 +148,12 @@ func (c *Cache) SetSizeMax(sizeMax int) { ...@@ -148,6 +148,12 @@ func (c *Cache) SetSizeMax(sizeMax int) {
} }
} }
// Close stops cache operation. It is illegal to use cache in any way after call to Close.
// XXX temp - will be gone
func (c *Cache) Close() {
close(c.gcCh)
}
// Load loads data from database via cache. // Load loads data from database via cache.
// //
// If data is already in cache - cached content is returned. // If data is already in cache - cached content is returned.
...@@ -493,9 +499,13 @@ func (c *Cache) gcsignal() { ...@@ -493,9 +499,13 @@ func (c *Cache) gcsignal() {
func (c *Cache) gcmain() { func (c *Cache) gcmain() {
for { for {
select { select {
case <-c.gcCh: case _, ok := <-c.gcCh:
// end of operation
if !ok {
return
}
// someone asks us to run GC // someone asks us to run GC
// XXX also check for quitting here
c.gc() c.gc()
} }
} }
......
...@@ -25,7 +25,9 @@ import ( ...@@ -25,7 +25,9 @@ import (
"errors" "errors"
"fmt" "fmt"
"reflect" "reflect"
"runtime"
"sort" "sort"
"sync/atomic"
"testing" "testing"
"github.com/kylelemons/godebug/pretty" "github.com/kylelemons/godebug/pretty"
...@@ -604,3 +606,216 @@ func (c *Checker) assertEq(a, b interface{}) { ...@@ -604,3 +606,216 @@ func (c *Checker) assertEq(a, b interface{}) {
c.t.Fatal("!eq:\n", pretty.Compare(a, b)) c.t.Fatal("!eq:\n", pretty.Compare(a, b))
} }
} }
// ----------------------------------------
// noopStorage is dummy StorLoader which for any oid/xid always returns 1-byte data
type noopStorage struct {}
var noopData = []byte{0}
func (s *noopStorage) Load(_ context.Context, xid Xid) (buf *Buf, serial Tid, err error) {
return mkbuf(noopData), 1, nil
}
// benchLoad serially benchmarks a StorLoader - either storage directly or a cache on top of it
//
// oid accessed are [0, worksize)
func benchLoad(b *testing.B, l StorLoader, worksize int) {
benchLoadN(b, b.N, l, worksize)
}
// worker for benchLoad, with n overridding b.N
func benchLoadN(b *testing.B, n int, l StorLoader, worksize int) {
ctx := context.Background()
xid := Xid{At: 1, Oid: 0}
for i := 0; i < n; i++ {
buf, _, err := l.Load(ctx, xid)
if err != nil {
b.Fatal(err)
}
buf.XRelease()
xid.Oid++
if xid.Oid >= Oid(worksize) {
xid.Oid = 0
}
}
}
// benchmark storage under cache
func BenchmarkNoopStorage(b *testing.B) { benchLoad(b, &noopStorage{}, b.N /* = ∞ */) }
// cache sizes to benchmark (elements = bytes (we are using 1-byte element))
var cachesizev = []int{0, 16, 128, 512, 4096}
// benchEachCache runs benchmark f against caches with various sizes on top of noop storage
func benchEachCache(b *testing.B, f func(b *testing.B, c *Cache)) {
s := &noopStorage{}
for _, size := range cachesizev {
b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) {
c := NewCache(s, size)
f(b, c)
c.Close()
})
}
}
// benchmark cache while N(access) < N(cache-entries)
func BenchmarkCacheStartup(b *testing.B) {
s := &noopStorage{}
c := NewCache(s, b.N)
benchLoad(b, c, b.N)
b.StopTimer()
c.Close()
}
// Serially benchmark cache overhead - the additional time cache adds for loading not-yet-cached entries.
// cache is already started - N(access) > N(cache-entries).
func BenchmarkCacheNoHit(b *testing.B) {
benchEachCache(b, func(b *testing.B, c *Cache) {
benchLoad(b, c, b.N /* = ∞ */)
})
}
// Serially benchmark t when load request hits cache.
// cache is already started - N(access) > N(cache-entries)
func BenchmarkCacheHit(b *testing.B) {
benchEachCache(b, func(b *testing.B, c *Cache) {
// warmup - load for cache size
benchLoadN(b, c.sizeMax, c, c.sizeMax)
b.ResetTimer()
benchLoad(b, c, c.sizeMax)
})
}
// ---- parallel benchmarks (many requests to 1 cache) ----
// benchLoadPar is like benchLoad but issues loads in parallel
func benchLoadPar(b *testing.B, l StorLoader, worksize int) {
ctx := context.Background()
np := runtime.GOMAXPROCS(0)
p := uint64(0)
b.RunParallel(func (pb *testing.PB) {
oid0 := Oid(atomic.AddUint64(&p, +1)) // all workers start/iterate at different oid
xid := Xid{At: 1, Oid: oid0 }
for pb.Next() {
buf, _, err := l.Load(ctx, xid)
if err != nil {
b.Fatal(err)
}
buf.XRelease()
xid.Oid += Oid(np)
if xid.Oid >= Oid(worksize) {
xid.Oid = oid0
}
}
})
}
func BenchmarkNoopStoragePar(b *testing.B) { benchLoadPar(b, &noopStorage{}, b.N /* = ∞ */) }
func BenchmarkCacheStartupPar(b *testing.B) {
s := &noopStorage{}
c := NewCache(s, b.N)
benchLoadPar(b, c, b.N)
b.StopTimer()
c.Close()
}
func BenchmarkCacheNoHitPar(b *testing.B) {
benchEachCache(b, func(b *testing.B, c *Cache) {
benchLoadPar(b, c, b.N /* = ∞ */)
})
}
func BenchmarkCacheHitPar(b *testing.B) {
benchEachCache(b, func(b *testing.B, c *Cache) {
// warmup (serially) - load for cache size
benchLoadN(b, c.sizeMax, c, c.sizeMax)
b.ResetTimer()
benchLoadPar(b, c, c.sizeMax)
})
}
// ---- parallel benchmarks (many caches - each is handled serially, as if each is inside separate process) ----
// XXX gc process is still only 1 shared.
// XXX this benchmark part will probably go away
// benchLoadProc is like benchLoad but works with PB, not B
func benchLoadProc(pb *testing.PB, l StorLoader, worksize int) error {
ctx := context.Background()
xid := Xid{At: 1, Oid: 0}
for pb.Next() {
buf, _, err := l.Load(ctx, xid)
if err != nil {
return err
}
buf.XRelease()
xid.Oid++
if xid.Oid >= Oid(worksize) {
xid.Oid = 0
}
}
return nil
}
func BenchmarkNoopStorageProc(b *testing.B) {
b.RunParallel(func (pb *testing.PB) {
s := &noopStorage{}
err := benchLoadProc(pb, s, b.N)
if err != nil {
b.Fatal(err)
}
})
}
func BenchmarkCacheStartupProc(b *testing.B) {
b.RunParallel(func (pb *testing.PB) {
s := &noopStorage{}
c := NewCache(s, b.N)
err := benchLoadProc(pb, c, b.N)
if err != nil {
b.Fatal(err)
}
// XXX stop timer
c.Close()
})
}
func benchEachCacheProc(b *testing.B, f func(b *testing.B, pb *testing.PB, c *Cache) error) {
for _, size := range cachesizev {
b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) {
b.RunParallel(func (pb *testing.PB) {
s := &noopStorage{}
c := NewCache(s, size)
err := f(b, pb, c)
c.Close()
if err != nil {
b.Fatal(err)
}
})
})
}
}
func BenchmarkCacheNoHitProc(b *testing.B) {
benchEachCacheProc(b, func(b *testing.B, pb *testing.PB, c *Cache) error {
return benchLoadProc(pb, c, b.N)
})
}
func BenchmarkCacheHitProc(b *testing.B) {
benchEachCacheProc(b, func(b *testing.B, pb *testing.PB, c *Cache) error {
// XXX no warmup
return benchLoadProc(pb, c, c.sizeMax)
})
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment