Commit b36d64fd authored by Han-Wen Nienhuys's avatar Han-Wen Nienhuys

Measure server side latencies too; simplify latencymap.

On T60, for each file we have
- Client side latency is 360us
- 106us of this is server side latency (4.5x lookup 23us, 1x getattr 4us) 
- 16.5us is due to latency measurement (time.Now() calls)
- 3us is due to garbage collection.
parent c99a7986
...@@ -79,10 +79,17 @@ interface, all kernel caching turned off, median stat time: ...@@ -79,10 +79,17 @@ interface, all kernel caching turned off, median stat time:
platform libfuse Go-FUSE difference (%) platform libfuse Go-FUSE difference (%)
Lenovo T60/Fedora17 (1cpu) 349us 355us 2% slower Lenovo T60/Fedora16 (1cpu) 349us 355us 2% slower
Lenovo T400/Lucid (1cpu) 138us 140us 5% slower Lenovo T400/Lucid (1cpu) 138us 140us 5% slower
Dell T3500/Lucid (1cpu) 72us 76us 5% slower Dell T3500/Lucid (1cpu) 72us 76us 5% slower
On T60, for each file we have
- Client side latency is 360us
- 106us of this is server side latency (4.5x lookup 23us, 1x getattr 4us)
- 16.5us is due to latency measurements.
- 3us is due to garbage collection.
CREDITS CREDITS
......
...@@ -28,10 +28,17 @@ func setupFs(fs fuse.FileSystem) (string, func()) { ...@@ -28,10 +28,17 @@ func setupFs(fs fuse.FileSystem) (string, func()) {
if err != nil { if err != nil {
panic(fmt.Sprintf("cannot mount %v", err)) // ugh - benchmark has no error methods. panic(fmt.Sprintf("cannot mount %v", err)) // ugh - benchmark has no error methods.
} }
state.SetRecordStatistics(true)
// state.Debug = true // state.Debug = true
go state.Loop() go state.Loop()
return mountPoint, func() { return mountPoint, func() {
lc, lns := state.Latencies().Get("LOOKUP")
gc, gns := state.Latencies().Get("GETATTR")
fmt.Printf("GETATTR %dus/call n=%d, LOOKUP %dus/call n=%d\n",
gns / int64(1000 * lc), gc,
lns / int64(1000 * lc), lc)
err := state.Unmount() err := state.Unmount()
if err != nil { if err != nil {
log.Println("error during unmount", err) log.Println("error during unmount", err)
...@@ -110,7 +117,6 @@ func TestingBOnePass(b *testing.B, threads int, files []string) (results []float ...@@ -110,7 +117,6 @@ func TestingBOnePass(b *testing.B, threads int, files []string) (results []float
runtime.ReadMemStats(&before) runtime.ReadMemStats(&before)
todo := b.N todo := b.N
for todo > 0 { for todo > 0 {
if len(files) > todo { if len(files) > todo {
files = files[:todo] files = files[:todo]
...@@ -128,6 +134,13 @@ func TestingBOnePass(b *testing.B, threads int, files []string) (results []float ...@@ -128,6 +134,13 @@ func TestingBOnePass(b *testing.B, threads int, files []string) (results []float
return results return results
} }
// Add this so we can estimate impact on latency numbers.
func BenchmarkTimeNow(b *testing.B) {
for i := 0; i < b.N; i++ {
time.Now()
}
}
func BenchmarkCFuseThreadedStat(b *testing.B) { func BenchmarkCFuseThreadedStat(b *testing.B) {
b.StopTimer() b.StopTimer()
......
package fuse package fuse
import ( import (
"fmt"
"sort"
"sync" "sync"
) )
...@@ -11,40 +9,31 @@ type latencyMapEntry struct { ...@@ -11,40 +9,31 @@ type latencyMapEntry struct {
ns int64 ns int64
} }
type LatencyArg struct {
Name string
Arg string
DtNs int64
}
type LatencyMap struct { type LatencyMap struct {
sync.Mutex sync.Mutex
stats map[string]*latencyMapEntry stats map[string]*latencyMapEntry
secondaryStats map[string]map[string]int64
} }
func NewLatencyMap() *LatencyMap { func NewLatencyMap() *LatencyMap {
m := &LatencyMap{} m := &LatencyMap{}
m.stats = make(map[string]*latencyMapEntry) m.stats = make(map[string]*latencyMapEntry)
m.secondaryStats = make(map[string]map[string]int64)
return m return m
} }
func (m *LatencyMap) AddMany(args []LatencyArg) { func (m *LatencyMap) Get(name string) (count int, dtNs int64) {
m.Mutex.Lock() m.Mutex.Lock()
for _, v := range args { l := m.stats[name]
m.add(v.Name, v.Arg, v.DtNs)
}
m.Mutex.Unlock() m.Mutex.Unlock()
return l.count, l.ns
} }
func (m *LatencyMap) Add(name string, arg string, dtNs int64) { func (m *LatencyMap) Add(name string, dtNs int64) {
m.Mutex.Lock() m.Mutex.Lock()
m.add(name, arg, dtNs) m.add(name, dtNs)
m.Mutex.Unlock() m.Mutex.Unlock()
} }
func (m *LatencyMap) add(name string, arg string, dtNs int64) { func (m *LatencyMap) add(name string, dtNs int64) {
e := m.stats[name] e := m.stats[name]
if e == nil { if e == nil {
e = new(latencyMapEntry) e = new(latencyMapEntry)
...@@ -53,13 +42,6 @@ func (m *LatencyMap) add(name string, arg string, dtNs int64) { ...@@ -53,13 +42,6 @@ func (m *LatencyMap) add(name string, arg string, dtNs int64) {
e.count++ e.count++
e.ns += dtNs e.ns += dtNs
if arg != "" {
_, ok := m.secondaryStats[name]
if !ok {
m.secondaryStats[name] = make(map[string]int64)
}
// TODO - do something with secondaryStats[name]
}
} }
func (m *LatencyMap) Counts() map[string]int { func (m *LatencyMap) Counts() map[string]int {
...@@ -87,15 +69,3 @@ func (m *LatencyMap) Latencies(unit float64) map[string]float64 { ...@@ -87,15 +69,3 @@ func (m *LatencyMap) Latencies(unit float64) map[string]float64 {
return r return r
} }
func (m *LatencyMap) TopArgs(name string) []string {
m.Mutex.Lock()
counts := m.secondaryStats[name]
results := make([]string, 0, len(counts))
for k, v := range counts {
results = append(results, fmt.Sprintf("% 9d %s", v, k))
}
m.Mutex.Unlock()
sort.Strings(results)
return results
}
...@@ -9,8 +9,8 @@ var _ = fmt.Println ...@@ -9,8 +9,8 @@ var _ = fmt.Println
func TestLatencyMap(t *testing.T) { func TestLatencyMap(t *testing.T) {
m := NewLatencyMap() m := NewLatencyMap()
m.Add("foo", "", 0.1e9) m.Add("foo", 0.1e9)
m.Add("foo", "", 0.2e9) m.Add("foo", 0.2e9)
l := m.Latencies(1e-3) l := m.Latencies(1e-3)
if l["foo"] != 150 { if l["foo"] != 150 {
......
...@@ -152,11 +152,8 @@ func NewMountState(fs RawFileSystem) *MountState { ...@@ -152,11 +152,8 @@ func NewMountState(fs RawFileSystem) *MountState {
return ms return ms
} }
func (ms *MountState) Latencies() map[string]float64 { func (ms *MountState) Latencies() *LatencyMap {
if ms.latencies == nil { return ms.latencies
return nil
}
return ms.latencies.Latencies(1e-3)
} }
func (ms *MountState) OperationCounts() map[string]int { func (ms *MountState) OperationCounts() map[string]int {
...@@ -220,6 +217,9 @@ func (ms *MountState) readRequest(exitIdle bool) (req *request, code Status) { ...@@ -220,6 +217,9 @@ func (ms *MountState) readRequest(exitIdle bool) (req *request, code Status) {
return nil, code return nil, code
} }
if ms.latencies != nil {
req.startNs = time.Now().UnixNano()
}
gobbled := req.setInput(dest[:n]) gobbled := req.setInput(dest[:n])
ms.reqMu.Lock() ms.reqMu.Lock()
...@@ -259,14 +259,9 @@ func (ms *MountState) returnRequest(req *request) { ...@@ -259,14 +259,9 @@ func (ms *MountState) returnRequest(req *request) {
func (ms *MountState) recordStats(req *request) { func (ms *MountState) recordStats(req *request) {
if ms.latencies != nil { if ms.latencies != nil {
endNs := time.Now().UnixNano() dt := time.Now().UnixNano() - req.startNs
dt := endNs - req.startNs
opname := operationName(req.inHeader.Opcode) opname := operationName(req.inHeader.Opcode)
ms.latencies.AddMany( ms.latencies.Add(opname, dt)
[]LatencyArg{
{opname, "", dt},
{opname + "-write", "", endNs - req.preWriteNs}})
} }
} }
...@@ -302,9 +297,6 @@ exit: ...@@ -302,9 +297,6 @@ exit:
break exit break exit
} }
if ms.latencies != nil {
req.startNs = time.Now().UnixNano()
}
ms.handleRequest(req) ms.handleRequest(req)
} }
} }
...@@ -359,10 +351,6 @@ func (ms *MountState) write(req *request) Status { ...@@ -359,10 +351,6 @@ func (ms *MountState) write(req *request) Status {
log.Println(req.OutputDebug()) log.Println(req.OutputDebug())
} }
if ms.latencies != nil {
req.preWriteNs = time.Now().UnixNano()
}
if header == nil { if header == nil {
return OK return OK
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment