Commit c9d89f6b authored by Lorenz Bauer's avatar Lorenz Bauer Committed by Brad Fitzpatrick

encoding/binary: cache struct sizes to speed up Read and Write

A majority of work is spent in dataSize when en/decoding the same
struct over and over again. This wastes a lot of work, since
the result doesn't change for a given reflect.Value.

Cache the result of the function for structs, so that subsequent
calls to dataSize can avoid doing work.

    name         old time/op    new time/op     delta
    ReadStruct     1.00µs ± 1%     0.37µs ± 1%   -62.99%  (p=0.029 n=4+4)
    WriteStruct    1.00µs ± 3%     0.37µs ± 1%   -62.69%  (p=0.008 n=5+5)

    name         old speed      new speed       delta
    ReadStruct   75.1MB/s ± 1%  202.9MB/s ± 1%  +170.16%  (p=0.029 n=4+4)
    WriteStruct  74.8MB/s ± 3%  200.4MB/s ± 1%  +167.96%  (p=0.008 n=5+5)

Fixes #34471

Change-Id: Ic5d987ca95f1197415ef93643a0af6fc1224fdf0
Reviewed-on: https://go-review.googlesource.com/c/go/+/199539Reviewed-by: default avatarBrad Fitzpatrick <bradfitz@golang.org>
Run-TryBot: Brad Fitzpatrick <bradfitz@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
parent 2c8529cb
......@@ -26,6 +26,7 @@ import (
"io"
"math"
"reflect"
"sync"
)
// A ByteOrder specifies how to convert byte sequences into
......@@ -363,18 +364,32 @@ func Size(v interface{}) int {
return dataSize(reflect.Indirect(reflect.ValueOf(v)))
}
var structSize sync.Map // map[reflect.Type]int
// dataSize returns the number of bytes the actual data represented by v occupies in memory.
// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice
// it returns the length of the slice times the element size and does not count the memory
// occupied by the header. If the type of v is not acceptable, dataSize returns -1.
func dataSize(v reflect.Value) int {
if v.Kind() == reflect.Slice {
switch v.Kind() {
case reflect.Slice:
if s := sizeof(v.Type().Elem()); s >= 0 {
return s * v.Len()
}
return -1
case reflect.Struct:
t := v.Type()
if size, ok := structSize.Load(t); ok {
return size.(int)
}
size := sizeof(t)
structSize.Store(t, size)
return size
default:
return sizeof(v.Type())
}
return sizeof(v.Type())
}
// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable.
......
......@@ -7,9 +7,11 @@ package binary
import (
"bytes"
"io"
"io/ioutil"
"math"
"reflect"
"strings"
"sync"
"testing"
)
......@@ -296,6 +298,58 @@ func TestBlankFields(t *testing.T) {
}
}
func TestSizeStructCache(t *testing.T) {
// Reset the cache, otherwise multiple test runs fail.
structSize = sync.Map{}
count := func() int {
var i int
structSize.Range(func(_, _ interface{}) bool {
i++
return true
})
return i
}
var total int
added := func() int {
delta := count() - total
total += delta
return delta
}
type foo struct {
A uint32
}
type bar struct {
A Struct
B foo
C Struct
}
testcases := []struct {
val interface{}
want int
}{
{new(foo), 1},
{new(bar), 1},
{new(bar), 0},
{new(struct{ A Struct }), 1},
{new(struct{ A Struct }), 0},
}
for _, tc := range testcases {
if Size(tc.val) == -1 {
t.Fatalf("Can't get the size of %T", tc.val)
}
if n := added(); n != tc.want {
t.Errorf("Sizing %T added %d entries to the cache, want %d", tc.val, n, tc.want)
}
}
}
// An attempt to read into a struct with an unexported field will
// panic. This is probably not the best choice, but at this point
// anything else would be an API change.
......@@ -436,6 +490,14 @@ func BenchmarkReadStruct(b *testing.B) {
}
}
func BenchmarkWriteStruct(b *testing.B) {
b.SetBytes(int64(Size(&s)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
Write(ioutil.Discard, BigEndian, &s)
}
}
func BenchmarkReadInts(b *testing.B) {
var ls Struct
bsr := &byteSliceReader{}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment