Commit 85d86a32 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 150cf9a7
...@@ -608,7 +608,8 @@ type WatchLink struct { ...@@ -608,7 +608,8 @@ type WatchLink struct {
// //
// both already established, and watches being initialized in-progress are registered here. // both already established, and watches being initialized in-progress are registered here.
// (see setupWatch) // (see setupWatch)
byfileMu sync.Mutex // XXX byfile -> wlink-global watchMu ?
byfileMu sync.Mutex // zheadMu.W | zheadMu.R + byfileMu (XXX recheck)
byfile map[zodb.Oid]*Watch // {} foid -> Watch byfile map[zodb.Oid]*Watch // {} foid -> Watch
// IO // IO
...@@ -623,12 +624,9 @@ type Watch struct { ...@@ -623,12 +624,9 @@ type Watch struct {
link *WatchLink // link to client link *WatchLink // link to client
file *BigFile // XXX needed? file *BigFile // XXX needed?
// XXX locking mu sync.Mutex // XXX ok ?
at zodb.Tid // requested to be watched @at
at zodb.Tid // requested to be watched @at pinned map[int64]zodb.Tid // {} blk -> rev blocks that are already pinned to be ≤ at
// {} blk -> rev
pinned map[int64]zodb.Tid // blocks that are already pinned to be ≤ at
} }
// -------- 3) Cache invariant -------- // -------- 3) Cache invariant --------
...@@ -1206,6 +1204,7 @@ func (f *BigFile) readBlk(ctx context.Context, blk int64, dest []byte) (err erro ...@@ -1206,6 +1204,7 @@ func (f *BigFile) readBlk(ctx context.Context, blk int64, dest []byte) (err erro
// XXX do we really need to use/propagate caller context here? ideally update // XXX do we really need to use/propagate caller context here? ideally update
// watchers should be synchronous, and in practice we just use 30s timeout. // watchers should be synchronous, and in practice we just use 30s timeout.
// Should a READ interrupt cause watch update failure? // Should a READ interrupt cause watch update failure?
// XXX -> pinWatchers? pinOnRead?
func (f *BigFile) updateWatchers(ctx context.Context, blk int64, treepath []btree.LONode, zblk zBlk, blkrevMax zodb.Tid) { func (f *BigFile) updateWatchers(ctx context.Context, blk int64, treepath []btree.LONode, zblk zBlk, blkrevMax zodb.Tid) {
// only head/ is being watched for // only head/ is being watched for
if f.head.rev != 0 { if f.head.rev != 0 {
...@@ -1502,11 +1501,14 @@ func (wlink *WatchLink) setupWatch(ctx context.Context, foid zodb.Oid, at zodb.T ...@@ -1502,11 +1501,14 @@ func (wlink *WatchLink) setupWatch(ctx context.Context, foid zodb.Oid, at zodb.T
// ↑ ↑ // ↑ ↑
// w.at head // w.at head
// //
// XXX locking // - also: there won't be simultaneous READs that would need to be
// unpinned, because we update w.at to requested at early.
//
// XXX register only if watch was created anew, not updated? // XXX register only if watch was created anew, not updated?
w.at = at w.at = at // XXX locking
f.watchTab[w] = struct{}{} // NOTE registering f.watchTab[w] and wlink.byfile[foid] = w must come together.
wlink.byfile[foid] = w f.watchTab[w] = struct{}{} // XXX locking
wlink.byfile[foid] = w // XXX locking
// XXX defer -> unregister watch if error? // XXX defer -> unregister watch if error?
...@@ -1553,9 +1555,6 @@ func (wlink *WatchLink) setupWatch(ctx context.Context, foid zodb.Oid, at zodb.T ...@@ -1553,9 +1555,6 @@ func (wlink *WatchLink) setupWatch(ctx context.Context, foid zodb.Oid, at zodb.T
return err return err
} }
// XXX or register w to f & wlink here?
// NOTE registering f.watchTab[w] and wlink.byfile[foid] = w must come together.
return nil return nil
} }
...@@ -1618,9 +1617,8 @@ func (wlink *WatchLink) _serve() (err error) { ...@@ -1618,9 +1617,8 @@ func (wlink *WatchLink) _serve() (err error) {
} }
// unregister all watches created on this wlink // unregister all watches created on this wlink
// XXX locking for _, w := range wlink.byfile { // XXX locking
for _, w := range wlink.byfile { delete(w.file.watchTab, w) // XXX locking
delete(w.file.watchTab, w)
} }
wlink.byfile = nil wlink.byfile = nil
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment