Commit adfec875 authored by Kirill Smelkov's avatar Kirill Smelkov

X size checking; found one more race <α vs <β (if α loads first but takes oce lock second)

parent 0a7c9b7c
......@@ -299,7 +299,14 @@ func (c *Cache) loadRCE(rce *revCacheEntry, oid zodb.Oid) {
}
// if rce & rceNext cover the same range -> drop rce
// (if we drop rce - no need to update c.lru as new rce is not on that list)
//
// if we drop rce - do not update c.lru as:
// 1. new rce is not on lru list,
// 2. rceNext (which becomes rce) might not be there on lru list.
//
// if rceNext is not yet there on lru list its loadRCE is in progress
// and will update lru and cache size for it itself.
rceDropped := false
if i + 1 < len(oce.rcev) {
rceNext := oce.rcev[i+1]
if rceNext.loaded() && tryMerge(rce, rceNext, rce, oid) {
......@@ -307,6 +314,7 @@ func (c *Cache) loadRCE(rce *revCacheEntry, oid zodb.Oid) {
// tryMerge can change rce.data if consistency is broken
δsize = 0
rce = rceNext
rceDropped = true
}
}
......@@ -317,6 +325,8 @@ func (c *Cache) loadRCE(rce *revCacheEntry, oid zodb.Oid) {
rcePrev := oce.rcev[i-1]
if rcePrev.loaded() && tryMerge(rcePrev, rce, rce, oid) {
rcePrevDropped = rcePrev
// XXX not always right - e.g. if rcePrev did not yet took oce lock
// XXX ^^^ test for this
δsize -= len(rcePrev.data)
}
}
......@@ -330,7 +340,9 @@ func (c *Cache) loadRCE(rce *revCacheEntry, oid zodb.Oid) {
if rcePrevDropped != nil {
rcePrevDropped.inLRU.Delete()
}
rce.inLRU.MoveBefore(&c.lru)
if !rceDropped {
rce.inLRU.MoveBefore(&c.lru)
}
//xv2 := map[string]interface{}{"lru": &c.lru, "rce": &rce.inLRU}
//fmt.Printf("\n--------\n%s\n\n\n", pretty.Sprint(xv2))
......
......@@ -86,17 +86,6 @@ func (stor *tStorage) Load(xid zodb.Xid) (data []byte, serial zodb.Tid, err erro
var ioerr = errors.New("input/output error")
var tstor = &tStorage{
dataMap: map[zodb.Oid][]tOidData{
1: {
{4, []byte("hello"), nil},
{7, nil, ioerr},
{10, []byte("world"), nil},
{18, []byte("www"), nil},
},
},
}
func xidlt(oid zodb.Oid, tid zodb.Tid) zodb.Xid {
return zodb.Xid{Oid: oid, XTid: zodb.XTid{Tid: tid, TidBefore: true}}
}
......@@ -125,9 +114,20 @@ func TestCache(t *testing.T) {
//eq := func(a, b interface{}) { t.Helper(); tc.assertEq(a, b) }
hello := []byte("hello")
world := []byte("world")
world := []byte("world!!")
www := []byte("www")
tstor := &tStorage{
dataMap: map[zodb.Oid][]tOidData{
1: {
{4, hello, nil},
{7, nil, ioerr},
{10, world, nil},
{18, www, nil},
},
},
}
c := NewCache(tstor)
checkLoad := func(xid zodb.Xid, data []byte, serial zodb.Tid, err error) {
......@@ -178,8 +178,9 @@ func TestCache(t *testing.T) {
}
}
checkMRU := func(mruvOk ...*revCacheEntry) {
checkMRU := func(sizeOk int, mruvOk ...*revCacheEntry) {
t.Helper()
size := 0
var mruv []*revCacheEntry
for hp, h := &c.lru, c.lru.prev; h != &c.lru; hp, h = h, h.prev {
//xv := []interface{}{&c.lru, h.rceFromInLRU()}
......@@ -188,17 +189,26 @@ func TestCache(t *testing.T) {
t.Fatalf("LRU list .next/.prev broken for\nh:\n%s\n\nhp:\n%s\n",
debug.Sprint(h), debug.Sprint(hp))
}
mruv = append(mruv, h.rceFromInLRU())
rce := h.rceFromInLRU()
size += len(rce.data)
mruv = append(mruv, rce)
}
if !reflect.DeepEqual(mruv, mruvOk) {
t.Fatalf("MRU:\n%s\n", pretty.Compare(mruv, mruvOk))
}
if size != sizeOk {
t.Fatalf("cache: size(all-rce-in-lru): %d ; want: %d", size, sizeOk)
}
if size != c.size {
t.Fatalf("cache: size(all-rce-in-lru): %d ; c.size: %d", size, c.size)
}
}
// ---- verify cache behaviour for must be loaded/merged entries ----
// (this excercises mostly loadRCE/tryMerge)
checkMRU()
checkMRU(0)
// load <3 -> new rce entry
checkLoad(xidlt(1,3), nil, 0, &zodb.ErrXidMissing{xidlt(1,3)})
......@@ -206,7 +216,7 @@ func TestCache(t *testing.T) {
ok1(len(oce1.rcev) == 1)
rce1_b3 := oce1.rcev[0]
checkRCE(rce1_b3, 3, 0, nil, &zodb.ErrXidMissing{xidlt(1,3)})
checkMRU(rce1_b3)
checkMRU(0, rce1_b3)
// load <4 -> <3 merged with <4
checkLoad(xidlt(1,4), nil, 0, &zodb.ErrXidMissing{xidlt(1,4)})
......@@ -214,14 +224,14 @@ func TestCache(t *testing.T) {
rce1_b4 := oce1.rcev[0]
ok1(rce1_b4 != rce1_b3) // rce1_b3 was merged into rce1_b4
checkRCE(rce1_b4, 4, 0, nil, &zodb.ErrXidMissing{xidlt(1,4)})
checkMRU(rce1_b4)
checkMRU(0, rce1_b4)
// load <2 -> <2 merged with <4
checkLoad(xidlt(1,2), nil, 0, &zodb.ErrXidMissing{xidlt(1,2)})
ok1(len(oce1.rcev) == 1)
ok1(oce1.rcev[0] == rce1_b4)
checkRCE(rce1_b4, 4, 0, nil, &zodb.ErrXidMissing{xidlt(1,4)})
checkMRU(rce1_b4)
checkMRU(0, rce1_b4)
// load <6 -> new rce entry with data
checkLoad(xidlt(1,6), hello, 4, nil)
......@@ -229,12 +239,12 @@ func TestCache(t *testing.T) {
rce1_b6 := oce1.rcev[1]
checkRCE(rce1_b6, 6, 4, hello, nil)
checkOCE(1, rce1_b4, rce1_b6)
checkMRU(rce1_b6, rce1_b4)
checkMRU(5, rce1_b6, rce1_b4)
// load <5 -> <5 merged with <6
checkLoad(xidlt(1,5), hello, 4, nil)
checkOCE(1, rce1_b4, rce1_b6)
checkMRU(rce1_b6, rce1_b4)
checkMRU(5, rce1_b6, rce1_b4)
// load <7 -> <6 merged with <7
checkLoad(xidlt(1,7), hello, 4, nil)
......@@ -243,7 +253,7 @@ func TestCache(t *testing.T) {
ok1(rce1_b7 != rce1_b6)
checkRCE(rce1_b7, 7, 4, hello, nil)
checkOCE(1, rce1_b4, rce1_b7)
checkMRU(rce1_b7, rce1_b4)
checkMRU(5, rce1_b7, rce1_b4)
// load <8 -> ioerr + new rce
checkLoad(xidlt(1,8), nil, 0, ioerr)
......@@ -251,7 +261,7 @@ func TestCache(t *testing.T) {
rce1_b8 := oce1.rcev[2]
checkRCE(rce1_b8, 8, 0, nil, ioerr)
checkOCE(1, rce1_b4, rce1_b7, rce1_b8)
checkMRU(rce1_b8, rce1_b7, rce1_b4)
checkMRU(5, rce1_b8, rce1_b7, rce1_b4)
// load <10 -> ioerr + new rce (IO errors are not merged)
checkLoad(xidlt(1,10), nil, 0, ioerr)
......@@ -259,7 +269,7 @@ func TestCache(t *testing.T) {
rce1_b10 := oce1.rcev[3]
checkRCE(rce1_b10, 10, 0, nil, ioerr)
checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b10)
checkMRU(rce1_b10, rce1_b8, rce1_b7, rce1_b4)
checkMRU(5, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
// load <11 -> new data rce, not merged with ioerr @<10
checkLoad(xidlt(1,11), world, 10, nil)
......@@ -267,7 +277,7 @@ func TestCache(t *testing.T) {
rce1_b11 := oce1.rcev[4]
checkRCE(rce1_b11, 11, 10, world, nil)
checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b10, rce1_b11)
checkMRU(rce1_b11, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
checkMRU(12, rce1_b11, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
// load <12 -> <11 merged with <12
checkLoad(xidlt(1,12), world, 10, nil)
......@@ -276,7 +286,7 @@ func TestCache(t *testing.T) {
ok1(rce1_b12 != rce1_b11)
checkRCE(rce1_b12, 12, 10, world, nil)
checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b10, rce1_b12)
checkMRU(rce1_b12, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
checkMRU(12, rce1_b12, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
// simulate case where <14 and <16 were loaded in parallel, both are ready
// but <14 takes oce lock first before <16 and so <12 is not yet merged
......@@ -287,30 +297,39 @@ func TestCache(t *testing.T) {
ok1(new16)
rce1_b16.serial = 10
rce1_b16.data = world
// here: first half of loadRCE(<16) before close(<16.ready)
checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b10, rce1_b12, rce1_b16)
ok1(!rce1_b16.loaded())
checkMRU(rce1_b12, rce1_b10, rce1_b8, rce1_b7, rce1_b4) // no <16 yet
checkMRU(12, rce1_b12, rce1_b10, rce1_b8, rce1_b7, rce1_b4) // no <16 yet
// (lookup <14 while <16 is not yet loaded so <16 is not picked
// automatically at lookup phase)
rce1_b14, new14 := c.lookupRCE(xidlt(1,14))
ok1(new14)
checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b10, rce1_b12, rce1_b14, rce1_b16)
checkMRU(rce1_b12, rce1_b10, rce1_b8, rce1_b7, rce1_b4) // no <14 and <16 yet
checkMRU(12, rce1_b12, rce1_b10, rce1_b8, rce1_b7, rce1_b4) // no <14 and <16 yet
// (now <16 becomes ready but not yet takes oce lock)
close(rce1_b16.ready)
ok1(rce1_b16.loaded())
checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b10, rce1_b12, rce1_b14, rce1_b16)
checkMRU(rce1_b12, rce1_b10, rce1_b8, rce1_b7, rce1_b4) // no <14 and <16 yet
checkMRU(12, rce1_b12, rce1_b10, rce1_b8, rce1_b7, rce1_b4) // no <14 and <16 yet
// (<14 also becomes ready and takes oce lock first, merging <12 and <14 into <16)
// (<14 also becomes ready and takes oce lock first, merging <12 and <14 into <16.
// <16 did not yet took oce lock so c.size is temporarily reduced and
// <16 is not yet on LRU list)
c.loadRCE(rce1_b14, 1)
checkRCE(rce1_b14, 14, 10, world, nil)
checkRCE(rce1_b16, 16, 10, world, nil)
checkRCE(rce1_b12, 12, 10, world, nil)
checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b10, rce1_b16)
checkMRU(rce1_b16, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
checkMRU(5 /*was 12*/, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
// (<16 takes oce lock and updates c.size and LRU list)
rce1_b16.ready = make(chan struct{}) // so loadRCE could run
c.loadRCE(rce1_b16, 1)
checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b10, rce1_b16)
checkMRU(12, rce1_b16, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
// load =17 -> <16 merged with <18
checkLoad(xideq(1,17), nil, 0, &zodb.ErrXidMissing{xideq(1,17)})
......@@ -319,7 +338,7 @@ func TestCache(t *testing.T) {
ok1(rce1_b18 != rce1_b16)
checkRCE(rce1_b18, 18, 10, world, nil)
checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b10, rce1_b18)
checkMRU(rce1_b18, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
checkMRU(12, rce1_b18, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
// load =18 -> new <19
checkLoad(xideq(1,18), www, 18, nil)
......@@ -327,7 +346,7 @@ func TestCache(t *testing.T) {
rce1_b19 := oce1.rcev[5]
checkRCE(rce1_b19, 19, 18, www, nil)
checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b10, rce1_b18, rce1_b19)
checkMRU(rce1_b19, rce1_b18, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
checkMRU(15, rce1_b19, rce1_b18, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
// load =19 -> <19 merged with <20
checkLoad(xideq(1,19), nil, 0, &zodb.ErrXidMissing{xideq(1,19)})
......@@ -336,7 +355,7 @@ func TestCache(t *testing.T) {
ok1(rce1_b20 != rce1_b19)
checkRCE(rce1_b20, 20, 18, www, nil)
checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b10, rce1_b18, rce1_b20)
checkMRU(rce1_b20, rce1_b18, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
checkMRU(15, rce1_b20, rce1_b18, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
// ---- verify rce lookup for must be cached entries ----
......@@ -382,7 +401,7 @@ func TestCache(t *testing.T) {
c.loadRCE(rce1_b9, 1)
checkRCE(rce1_b9, 9, 0, nil, ioerr)
checkOCE(1, rce1_b4, rce1_b7, rce1_b8, rce1_b9, rce1_b10, rce1_b18, rce1_b20)
checkMRU(rce1_b9, rce1_b20, rce1_b18, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
checkMRU(15, rce1_b9, rce1_b20, rce1_b18, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
checkLookup(xideq(1,8), rce1_b9)
checkLookup(xidlt(1,8), rce1_b8)
......@@ -406,13 +425,13 @@ func TestCache(t *testing.T) {
checkLookup(xidlt(1,1), rce1_b4)
// ---- verify how LRU changes for in-cache loads ----
checkMRU(rce1_b9, rce1_b20, rce1_b18, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
checkMRU(15, rce1_b9, rce1_b20, rce1_b18, rce1_b10, rce1_b8, rce1_b7, rce1_b4)
checkLoad(xidlt(1,7), hello, 4, nil)
checkMRU(rce1_b7, rce1_b9, rce1_b20, rce1_b18, rce1_b10, rce1_b8, rce1_b4)
checkMRU(15, rce1_b7, rce1_b9, rce1_b20, rce1_b18, rce1_b10, rce1_b8, rce1_b4)
checkLoad(xidlt(1,18), world, 10, nil)
checkMRU(rce1_b18, rce1_b7, rce1_b9, rce1_b20, rce1_b10, rce1_b8, rce1_b4)
checkMRU(15, rce1_b18, rce1_b7, rce1_b9, rce1_b20, rce1_b10, rce1_b8, rce1_b4)
// XXX verify LRU eviction
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment