Commit 2c2e0b53 authored by Kirill Smelkov's avatar Kirill Smelkov

X benchmark for C load from S

BenchmarkGetObject-4       50000             22679 ns/op           11439 B/op         50 allocs/op
parent 0f310080
......@@ -246,6 +246,8 @@ func (l *listener) run() {
}
// XXX add backpressure on too much incoming connections without client .Accept ?
// XXX do not let err go to .accept() - handle here? (but here
// we do not know with which severety and context to log)
link, err := l.l.Accept()
go l.accept(link, err)
}
......@@ -276,7 +278,7 @@ func (l *listener) accept(link *NodeLink, err error) {
}
}
if !ok {
if !ok && link != nil {
link.Close()
}
}
......
......@@ -34,6 +34,8 @@ import (
"testing"
"unsafe"
"golang.org/x/sync/errgroup"
"github.com/kylelemons/godebug/pretty"
"lab.nexedi.com/kirr/neo/go/neo"
......@@ -549,3 +551,80 @@ func TestMasterStorage(t *testing.T) {
Scancel() // ---- // ----
xwait(gwg)
}
func BenchmarkGetObject(b *testing.B) {
// create test cluster <- XXX factor to utility func
net := pipenet.New("testnet")
Mhost := net.Host("m")
Shost := net.Host("s")
Chost := net.Host("c")
zstor := xfs1stor("../../zodb/storage/fs1/testdata/1.fs")
M := NewMaster("abc1", ":1", Mhost)
S := NewStorage("abc1", "m:1", ":1", Shost, zstor)
C := client.NewClient("abc1", "m:1", Chost)
// spawn M & S
ctx, cancel := context.WithCancel(context.Background())
wg, ctx := errgroup.WithContext(ctx)
defer wg.Wait()
defer cancel()
// XXX to wait for "ready to start" -> XXX add something to M api?
tracer := &MyTracer{xtesting.NewSyncTracer()}
tc := xtesting.NewTraceChecker(b, tracer.SyncTracer)
pg := &tracing.ProbeGroup{}
tracing.Lock()
traceMasterStartReady_Attach(pg, tracer.traceMasterStartReady)
tracing.Unlock()
wg.Go(func() error {
return M.Run(ctx)
})
wg.Go(func() error {
return S.Run(ctx)
})
// command M to start
tc.Expect(masterStartReady(M, true)) // <- XXX better with M api
pg.Done()
err := M.Start()
if err != nil {
b.Fatal(err)
}
xid1 := zodb.Xid{Oid: 1}
xid1.Tid = zodb.TidMax
xid1.TidBefore = true
data1, serial1, err := zstor.Load(ctx, xid1)
if err != nil {
b.Fatal(err)
}
// C.Load(xid1)
xcload1 := func() {
cdata1, cserial1, err := C.Load(ctx, xid1)
if err != nil {
b.Fatal(err)
}
if !(bytes.Equal(cdata1, data1) && cserial1 == serial1) {
b.Fatalf("C.Load first -> %q %v ; want %q %v", cdata1, cserial1, data1, serial1)
}
}
// do first C.Load - this also implicitly waits for M & S to come up
// and C to connect to M and S.
xcload1()
// now start the benchmark
b.ResetTimer()
for i := 0; i < b.N; i++ {
xcload1()
}
}
......@@ -877,7 +877,7 @@ func storCtlService(ctx context.Context, stor *neo.Node) (err error) {
select {
// XXX stub
case <-time.After(1*time.Second):
println(".")
//println(".")
case <-ctx.Done():
// XXX also send StopOperation?
......
......@@ -79,12 +79,12 @@ func (st *SyncTracer) Get1() *SyncTraceMsg {
// XXX naming -> SyncTraceChecker
// TraceChecker synchronously collects and checks tracing events from a SyncTracer
type TraceChecker struct {
t *testing.T
t testing.TB
st *SyncTracer
}
// XXX doc
func NewTraceChecker(t *testing.T, st *SyncTracer) *TraceChecker {
func NewTraceChecker(t testing.TB, st *SyncTracer) *TraceChecker {
return &TraceChecker{t: t, st: st}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment