Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neo
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Labels
Merge Requests
2
Merge Requests
2
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Jobs
Commits
Open sidebar
Kirill Smelkov
neo
Commits
cbe904d8
Commit
cbe904d8
authored
Feb 03, 2021
by
Kirill Smelkov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
.
parent
0af56694
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
84 additions
and
94 deletions
+84
-94
go/neo/client.go
go/neo/client.go
+8
-8
go/neo/client_test.go
go/neo/client_test.go
+1
-1
go/neo/master.go
go/neo/master.go
+30
-30
go/neo/mastered.go
go/neo/mastered.go
+24
-23
go/neo/storage.go
go/neo/storage.go
+8
-8
go/neo/t_cluster_test.go
go/neo/t_cluster_test.go
+3
-3
go/neo/t_tracego_test.go
go/neo/t_tracego_test.go
+6
-18
go/neo/xneo/xneo.go
go/neo/xneo/xneo.go
+4
-3
No files found.
go/neo/client.go
View file @
cbe904d8
...
...
@@ -49,7 +49,7 @@ import (
// Client is NEO node that talks to NEO cluster and exposes access to it via ZODB interfaces.
type
Client
struct
{
node
*
_MasteredNode
node
m
*
_MasteredNode
// XXX naming
// Run is run under:
runWG
*
xsync
.
WorkGroup
...
...
@@ -77,7 +77,7 @@ var _ zodb.IStorageDriver = (*Client)(nil)
// Use Run to actually start running the node.
func
NewClient
(
clusterName
,
masterAddr
string
,
net
xnet
.
Networker
)
*
Client
{
c
:=
&
Client
{
node
:
newMasteredNode
(
proto
.
CLIENT
,
clusterName
,
net
,
masterAddr
),
node
m
:
newMasteredNode
(
proto
.
CLIENT
,
clusterName
,
net
,
masterAddr
),
at0Ready
:
make
(
chan
struct
{}),
}
...
...
@@ -97,7 +97,7 @@ func (c *Client) Close() (err error) {
// close networker if configured to do so
if
c
.
ownNet
{
err2
:=
c
.
node
.
Net
.
Close
()
err2
:=
c
.
node
m
.
Node
.
Net
.
Close
()
if
err
==
nil
{
err
=
err2
}
...
...
@@ -115,7 +115,7 @@ func (c *Client) Run(ctx context.Context) (err error) {
ctx
,
cancel
:=
xcontext
.
Merge
/*Cancel*/
(
ctx
,
runCtx
)
defer
cancel
()
return
c
.
node
.
TalkMaster
(
ctx
,
func
(
ctx
context
.
Context
,
mlink
*
_MasterLink
)
error
{
return
c
.
node
m
.
TalkMaster
(
ctx
,
func
(
ctx
context
.
Context
,
mlink
*
_MasterLink
)
error
{
// XXX errctx ("on redial"? "connected"?)
c
.
head0
=
c
.
head
...
...
@@ -271,7 +271,7 @@ func (c *Client) Sync(ctx context.Context) (head zodb.Tid, err error) {
}
}()
err
=
c
.
node
.
WithOperational
(
ctx
,
func
(
mlink
*
neonet
.
NodeLink
,
_
*
xneo
.
ClusterState
)
error
{
err
=
c
.
node
m
.
WithOperational
(
ctx
,
func
(
mlink
*
neonet
.
NodeLink
,
_
*
xneo
.
ClusterState
)
error
{
// XXX mlink can become down while we are making the call.
// XXX do we want to return error or retry?
reply
:=
proto
.
AnswerLastTransaction
{}
...
...
@@ -296,7 +296,7 @@ func (c *Client) Load(ctx context.Context, xid zodb.Xid) (buf *mem.Buf, serial z
// Retrieve storages we might need to access.
storv
:=
make
([]
*
xneo
.
PeerNode
,
0
,
1
)
err
=
c
.
node
.
WithOperational
(
ctx
,
func
(
mlink
*
neonet
.
NodeLink
,
cs
*
xneo
.
ClusterState
)
error
{
err
=
c
.
node
m
.
WithOperational
(
ctx
,
func
(
mlink
*
neonet
.
NodeLink
,
cs
*
xneo
.
ClusterState
)
error
{
for
_
,
cell
:=
range
cs
.
PartTab
.
Get
(
xid
.
Oid
)
{
if
cell
.
Readable
()
{
stor
:=
cs
.
NodeTab
.
Get
(
cell
.
NID
)
...
...
@@ -510,10 +510,10 @@ func (c *Client) URL() string {
// (but we need to be able to construct URL if Client was created via NewClient directly)
zurl
:=
"neo"
if
strings
.
Contains
(
c
.
node
.
Net
.
Network
(),
"+tls"
)
{
if
strings
.
Contains
(
c
.
node
m
.
Node
.
Net
.
Network
(),
"+tls"
)
{
zurl
+=
"s"
}
zurl
+=
fmt
.
Sprintf
(
"://%s/%s"
,
c
.
node
.
MasterAddr
,
c
.
n
ode
.
ClusterName
)
zurl
+=
fmt
.
Sprintf
(
"://%s/%s"
,
c
.
node
m
.
Node
.
MasterAddr
,
c
.
nodem
.
N
ode
.
ClusterName
)
return
zurl
}
...
...
go/neo/client_test.go
View file @
cbe904d8
...
...
@@ -567,7 +567,7 @@ func withNEO(t *testing.T, f func(t *testing.T, nsrv NEOSrv, ndrv *Client), optv
if
noautodetect
{
encOK
=
srvEnc
}
err
=
ndrv
.
node
.
WithOperational
(
context
.
Background
(),
func
(
mlink
*
neonet
.
NodeLink
,
_
*
xneo
.
ClusterState
)
error
{
err
=
ndrv
.
node
m
.
WithOperational
(
context
.
Background
(),
func
(
mlink
*
neonet
.
NodeLink
,
_
*
xneo
.
ClusterState
)
error
{
enc
:=
mlink
.
Encoding
()
if
enc
!=
encOK
{
t
.
Fatalf
(
"connected with encoding %c ; want %c"
,
enc
,
encOK
)
...
...
go/neo/master.go
View file @
cbe904d8
...
...
@@ -74,7 +74,7 @@ type Master struct {
// Use Run to actually start running the node.
func
NewMaster
(
clusterName
string
,
net
xnet
.
Networker
)
*
Master
{
m
:=
&
Master
{
node
:
xneo
.
NewNode
(
net
,
proto
.
MASTER
,
clusterName
,
""
),
node
:
xneo
.
NewNode
(
proto
.
MASTER
,
clusterName
,
net
,
""
),
ctlStart
:
make
(
chan
chan
error
),
ctlStop
:
make
(
chan
chan
struct
{}),
...
...
@@ -120,7 +120,7 @@ func (m *Master) Shutdown() error {
// setClusterState sets .clusterState and notifies subscribers.
func
(
m
*
Master
)
setClusterState
(
state
proto
.
ClusterState
)
{
m
.
node
.
ClusterStat
e
.
Set
(
state
)
m
.
node
.
State
.
Cod
e
.
Set
(
state
)
// TODO notify subscribers
}
...
...
@@ -148,7 +148,7 @@ func (m *Master) Run(ctx context.Context, l xnet.Listener) (err error) {
}
// update nodeTab with self
m
.
node
.
NodeTab
.
Update
(
m
.
node
.
MyInfo
)
m
.
node
.
State
.
NodeTab
.
Update
(
m
.
node
.
MyInfo
)
// wrap listener with link / identificaton hello checker
lli
:=
xneo
.
NewListener
(
neonet
.
NewLinkListener
(
l
))
...
...
@@ -299,7 +299,7 @@ func (m *Master) recovery(ctx context.Context) (err error) {
// start recovery on all storages we are currently in touch with
// XXX close links to clients
for
_
,
stor
:=
range
m
.
node
.
NodeTab
.
StorageList
()
{
for
_
,
stor
:=
range
m
.
node
.
State
.
NodeTab
.
StorageList
()
{
if
stor
.
State
>
proto
.
DOWN
{
// XXX state cmp ok ? XXX or stor.Link != nil ?
inprogress
++
wg
.
Add
(
1
)
...
...
@@ -358,18 +358,18 @@ loop:
// we are interested in latest partTab
// NOTE during recovery no one must be subscribed to
// partTab so it is ok to simply change whole m.partTab
if
r
.
partTab
.
PTid
>
m
.
node
.
PartTab
.
PTid
{
m
.
node
.
PartTab
=
r
.
partTab
if
r
.
partTab
.
PTid
>
m
.
node
.
State
.
PartTab
.
PTid
{
m
.
node
.
State
.
PartTab
=
r
.
partTab
}
}
// update indicator whether cluster currently can be operational or not
var
ready
bool
if
m
.
node
.
PartTab
.
PTid
==
0
{
if
m
.
node
.
State
.
PartTab
.
PTid
==
0
{
// new cluster - allow startup if we have some storages passed
// recovery and there is no in-progress recovery running
nup
:=
0
for
_
,
stor
:=
range
m
.
node
.
NodeTab
.
StorageList
()
{
for
_
,
stor
:=
range
m
.
node
.
State
.
NodeTab
.
StorageList
()
{
if
stor
.
State
>
proto
.
DOWN
{
nup
++
}
...
...
@@ -377,7 +377,7 @@ loop:
ready
=
(
nup
>
0
&&
inprogress
==
0
)
}
else
{
ready
=
m
.
node
.
PartTab
.
OperationalWith
(
m
.
nod
e
.
NodeTab
)
// XXX + node state
ready
=
m
.
node
.
State
.
PartTab
.
OperationalWith
(
m
.
node
.
Stat
e
.
NodeTab
)
// XXX + node state
}
if
readyToStart
!=
ready
{
...
...
@@ -451,24 +451,24 @@ loop2:
// S PENDING -> RUNNING
// XXX recheck logic is ok for when starting existing cluster
for
_
,
stor
:=
range
m
.
node
.
NodeTab
.
StorageList
()
{
for
_
,
stor
:=
range
m
.
node
.
State
.
NodeTab
.
StorageList
()
{
if
stor
.
State
==
proto
.
PENDING
{
stor
.
SetState
(
proto
.
RUNNING
)
}
}
// if we are starting for new cluster - create partition table
if
m
.
node
.
PartTab
.
PTid
==
0
{
if
m
.
node
.
State
.
PartTab
.
PTid
==
0
{
// XXX -> m.nodeTab.StorageList(State > DOWN)
storv
:=
[]
*
xneo
.
PeerNode
{}
for
_
,
stor
:=
range
m
.
node
.
NodeTab
.
StorageList
()
{
for
_
,
stor
:=
range
m
.
node
.
State
.
NodeTab
.
StorageList
()
{
if
stor
.
State
>
proto
.
DOWN
{
storv
=
append
(
storv
,
stor
)
}
}
m
.
node
.
PartTab
=
xneo
.
MakePartTab
(
1
/* XXX hardcoded */
,
storv
)
m
.
node
.
PartTab
.
PTid
=
1
log
.
Infof
(
ctx
,
"creating new partition table: %s"
,
m
.
node
.
PartTab
)
m
.
node
.
State
.
PartTab
=
xneo
.
MakePartTab
(
1
/* XXX hardcoded */
,
storv
)
m
.
node
.
State
.
PartTab
.
PTid
=
1
log
.
Infof
(
ctx
,
"creating new partition table: %s"
,
m
.
node
.
State
.
PartTab
)
}
return
nil
...
...
@@ -548,13 +548,13 @@ func (m *Master) verify(ctx context.Context) (err error) {
// XXX (= py), rationale=?
// start verification on all storages we are currently in touch with
for
_
,
stor
:=
range
m
.
node
.
NodeTab
.
StorageList
()
{
for
_
,
stor
:=
range
m
.
node
.
State
.
NodeTab
.
StorageList
()
{
if
stor
.
State
>
proto
.
DOWN
{
// XXX state cmp ok ? XXX or stor.Link != nil ?
inprogress
++
wg
.
Add
(
1
)
go
func
()
{
defer
wg
.
Done
()
storCtlVerify
(
ctx
,
stor
,
m
.
node
.
PartTab
,
verify
)
storCtlVerify
(
ctx
,
stor
,
m
.
node
.
State
.
PartTab
,
verify
)
}()
}
}
...
...
@@ -582,7 +582,7 @@ loop:
return
}
storCtlVerify
(
ctx
,
node
,
m
.
node
.
PartTab
,
verify
)
storCtlVerify
(
ctx
,
node
,
m
.
node
.
State
.
PartTab
,
verify
)
}()
/*
...
...
@@ -590,7 +590,7 @@ loop:
n.node.SetState(proto.DOWN)
// if cluster became non-operational - we cancel verification
if !m.node.
PartTab.OperationalWith(m.nod
e.NodeTab) {
if !m.node.
State.PartTab.OperationalWith(m.node.Stat
e.NodeTab) {
// XXX ok to instantly cancel? or better
// graceful shutdown in-flight verifications?
vcancel()
...
...
@@ -616,7 +616,7 @@ loop:
// check partTab is still operational
// if not -> cancel to go back to recovery
if
!
m
.
node
.
PartTab
.
OperationalWith
(
m
.
nod
e
.
NodeTab
)
{
if
!
m
.
node
.
State
.
PartTab
.
OperationalWith
(
m
.
node
.
Stat
e
.
NodeTab
)
{
vcancel
()
err
=
errClusterDegraded
break
loop
...
...
@@ -758,7 +758,7 @@ func (m *Master) service(ctx context.Context) (err error) {
wg
:=
&
sync
.
WaitGroup
{}
// spawn per-storage service driver
for
_
,
stor
:=
range
m
.
node
.
NodeTab
.
StorageList
()
{
for
_
,
stor
:=
range
m
.
node
.
State
.
NodeTab
.
StorageList
()
{
if
stor
.
State
==
proto
.
RUNNING
{
// XXX note PENDING - not adding to service; ok?
wg
.
Add
(
1
)
go
func
()
{
...
...
@@ -814,7 +814,7 @@ loop:
n.node.SetState(proto.DOWN)
// if cluster became non-operational - cancel service
if !m.node.
PartTab.OperationalWith(m.nod
e.NodeTab) {
if !m.node.
State.PartTab.OperationalWith(m.node.Stat
e.NodeTab) {
err = errClusterDegraded
break loop
}
...
...
@@ -953,19 +953,19 @@ func (m *Master) keepPeerUpdated(ctx context.Context, link *neonet.NodeLink) (er
//clusterState := m.node.ClusterState
// XXX ^^^ + subscribe
nodev
:=
m
.
node
.
NodeTab
.
All
()
nodev
:=
m
.
node
.
State
.
NodeTab
.
All
()
nodeiv
:=
make
([]
proto
.
NodeInfo
,
len
(
nodev
))
for
i
,
node
:=
range
nodev
{
// NOTE .NodeInfo is data not pointers - so won't change after we copy it to nodeiv
nodeiv
[
i
]
=
node
.
NodeInfo
}
ptid
:=
m
.
node
.
PartTab
.
PTid
ptid
:=
m
.
node
.
State
.
PartTab
.
PTid
ptnr
:=
uint32
(
0
)
// FIXME hardcoded NumReplicas; NEO/py keeps this as n(replica)-1
ptv
:=
m
.
node
.
PartTab
.
Dump
()
ptv
:=
m
.
node
.
State
.
PartTab
.
Dump
()
// XXX RLock is not enough for subscribe - right?
nodech
,
nodeUnsubscribe
:=
m
.
node
.
NodeTab
.
SubscribeBuffered
()
nodech
,
nodeUnsubscribe
:=
m
.
node
.
State
.
NodeTab
.
SubscribeBuffered
()
m
.
node
.
StateMu
.
RUnlock
()
...
...
@@ -1053,7 +1053,7 @@ func (m *Master) identify(ctx context.Context, n nodeCome) (node *xneo.PeerNode,
// XXX check nid matches NodeType
node
=
m
.
node
.
NodeTab
.
Get
(
nid
)
node
=
m
.
node
.
State
.
NodeTab
.
Get
(
nid
)
if
node
!=
nil
{
// reject - nid is already occupied by someone else
// XXX check also for down state - it could be the same node reconnecting
...
...
@@ -1064,7 +1064,7 @@ func (m *Master) identify(ctx context.Context, n nodeCome) (node *xneo.PeerNode,
// XXX ok to have this logic inside identify? (better provide from outside ?)
switch
nodeType
{
case
proto
.
CLIENT
:
if
m
.
node
.
ClusterStat
e
!=
proto
.
ClusterRunning
{
if
m
.
node
.
State
.
Cod
e
!=
proto
.
ClusterRunning
{
return
&
proto
.
Error
{
proto
.
NOT_READY
,
"cluster not operational"
}
}
...
...
@@ -1112,7 +1112,7 @@ func (m *Master) identify(ctx context.Context, n nodeCome) (node *xneo.PeerNode,
IdTime
:
proto
.
IdTime
(
m
.
monotime
()),
}
node
=
m
.
node
.
NodeTab
.
Update
(
nodeInfo
)
// NOTE this notifies all nodeTab subscribers
node
=
m
.
node
.
State
.
NodeTab
.
Update
(
nodeInfo
)
// NOTE this notifies all nodeTab subscribers
node
.
SetLink
(
n
.
req
.
Link
())
return
node
,
accept
}
...
...
@@ -1123,7 +1123,7 @@ func (m *Master) identify(ctx context.Context, n nodeCome) (node *xneo.PeerNode,
func
(
m
*
Master
)
allocNID
(
nodeType
proto
.
NodeType
)
proto
.
NodeID
{
for
num
:=
int32
(
1
);
num
<
1
<<
24
;
num
++
{
nid
:=
proto
.
NID
(
nodeType
,
num
)
if
m
.
node
.
NodeTab
.
Get
(
nid
)
==
nil
{
if
m
.
node
.
State
.
NodeTab
.
Get
(
nid
)
==
nil
{
return
nid
}
}
...
...
go/neo/mastered.go
View file @
cbe904d8
...
...
@@ -60,8 +60,9 @@ import (
// The connection to master is persisted by redial as needed.
//
// XXX update after introduction of _MasterLink
// XXX use `nodem *_MasteredNode` XXX naming=?
type
_MasteredNode
struct
{
n
ode
*
xneo
.
Node
N
ode
*
xneo
.
Node
// myInfo proto.NodeInfo // type, laddr, nid, state, idtime
// ClusterName string
// Net xnet.Networker // network AP we are sending/receiving on
...
...
@@ -114,7 +115,7 @@ const (
// XXX doc
func
newMasteredNode
(
typ
proto
.
NodeType
,
clusterName
string
,
net
xnet
.
Networker
,
masterAddr
string
)
*
_MasteredNode
{
node
:=
&
_MasteredNode
{
n
ode
:
xneo
.
NewNode
(
typ
,
clusterName
,
net
,
masterAddr
),
N
ode
:
xneo
.
NewNode
(
typ
,
clusterName
,
net
,
masterAddr
),
/*
myInfo: proto.NodeInfo{
Type: typ,
...
...
@@ -155,9 +156,9 @@ func newMasteredNode(typ proto.NodeType, clusterName string, net xnet.Networker,
func
(
node
*
_MasteredNode
)
TalkMaster
(
ctx
context
.
Context
,
f
func
(
context
.
Context
,
*
_MasterLink
)
error
)
(
err
error
)
{
// me0 describes local node when it starts connecting to master, e.g. 'client C?'.
// we don't use just NID because it is initially 0 and because master can tell us to change it.
me0
:=
strings
.
ToLower
(
node
.
m
yInfo
.
Type
.
String
())
me0
:=
strings
.
ToLower
(
node
.
Node
.
M
yInfo
.
Type
.
String
())
me0
+=
" "
mynid0
:=
node
.
m
yInfo
.
NID
mynid0
:=
node
.
Node
.
M
yInfo
.
NID
if
mynid0
==
0
{
me0
+=
"?"
}
else
{
...
...
@@ -165,7 +166,7 @@ func (node *_MasteredNode) TalkMaster(ctx context.Context, f func(context.Contex
}
ctx0
:=
ctx
defer
task
.
Runningf
(
&
ctx
,
"%s: talk master(%s)"
,
me0
,
node
.
MasterAddr
)(
&
err
)
defer
task
.
Runningf
(
&
ctx
,
"%s: talk master(%s)"
,
me0
,
node
.
Node
.
MasterAddr
)(
&
err
)
for
{
node
.
updateOperational
(
func
()
{
...
...
@@ -193,23 +194,23 @@ func (node *_MasteredNode) TalkMaster(ctx context.Context, f func(context.Contex
func
(
node
*
_MasteredNode
)
talkMaster1
(
ctx
,
ctxPreTalkM
context
.
Context
,
f
func
(
context
.
Context
,
*
_MasterLink
)
error
)
error
{
reqID
:=
&
proto
.
RequestIdentification
{
NodeType
:
node
.
m
yInfo
.
Type
,
NID
:
node
.
m
yInfo
.
NID
,
Address
:
node
.
m
yInfo
.
Addr
,
ClusterName
:
node
.
ClusterName
,
IdTime
:
node
.
myInfo
.
IdTime
,
// XXX ok?
NodeType
:
node
.
Node
.
M
yInfo
.
Type
,
NID
:
node
.
Node
.
M
yInfo
.
NID
,
Address
:
node
.
Node
.
M
yInfo
.
Addr
,
ClusterName
:
node
.
Node
.
ClusterName
,
IdTime
:
node
.
Node
.
MyInfo
.
IdTime
,
// XXX ok?
DevPath
:
nil
,
// XXX stub
NewNID
:
nil
,
// XXX stub
}
mlink
,
accept
,
err
:=
xneo
.
Dial
(
ctx
,
proto
.
MASTER
,
node
.
N
et
,
n
ode
.
MasterAddr
,
reqID
)
mlink
,
accept
,
err
:=
xneo
.
Dial
(
ctx
,
proto
.
MASTER
,
node
.
N
ode
.
Net
,
node
.
N
ode
.
MasterAddr
,
reqID
)
if
err
!=
nil
{
return
err
}
return
xcontext
.
WithCloseOnErrCancel
(
ctx
,
mlink
,
func
()
(
err
error
)
{
if
accept
.
YourNID
!=
node
.
m
yInfo
.
NID
{
if
accept
.
YourNID
!=
node
.
Node
.
M
yInfo
.
NID
{
log
.
Infof
(
ctx
,
"master %s told us to be %s"
,
accept
.
MyNID
,
accept
.
YourNID
)
node
.
m
yInfo
.
NID
=
accept
.
YourNID
// XXX locking ?
node
.
Node
.
M
yInfo
.
NID
=
accept
.
YourNID
// XXX locking ?
}
// XXX verify Mnid = M*; our nid corresponds to our type
...
...
@@ -242,7 +243,7 @@ func (node *_MasteredNode) talkMaster1(ctx, ctxPreTalkM context.Context, f func(
node
.
updateOperational
(
func
()
{
err
=
node
.
updateNodeTab
(
ctx
,
&
mnt
)
// the only err is cmdShutdown
node
.
s
tate
.
PartTab
=
pt
node
.
Node
.
S
tate
.
PartTab
=
pt
if
err
==
nil
{
// keep mlink=nil on shutdown so that
// .operational does not change to y.
...
...
@@ -352,7 +353,7 @@ func (node *_MasteredNode) recvδstate(ctx context.Context, msg proto.Msg) (δpt
pt
:=
xneo
.
PartTabFromDump
(
msg
.
PTid
,
msg
.
RowList
)
// FIXME handle msg.NumReplicas
// XXX logging under lock ok?
log
.
Infof
(
ctx
,
"parttab update: %s"
,
pt
)
node
.
s
tate
.
PartTab
=
pt
node
.
Node
.
S
tate
.
PartTab
=
pt
// <- δ(partTab)
case
*
proto
.
NotifyPartitionChanges
:
...
...
@@ -365,8 +366,8 @@ func (node *_MasteredNode) recvδstate(ctx context.Context, msg proto.Msg) (δpt
case
*
proto
.
NotifyClusterState
:
log
.
Infof
(
ctx
,
"state update: %s"
,
msg
.
State
)
node
.
s
tate
.
Code
=
msg
.
State
traceClusterStateChanged
(
&
node
.
s
tate
.
Code
)
node
.
Node
.
S
tate
.
Code
=
msg
.
State
traceClusterStateChanged
(
&
node
.
Node
.
S
tate
.
Code
)
}
})
...
...
@@ -384,7 +385,7 @@ func (node *_MasteredNode) updateOperational(δf func()) {
defer
node
.
opMu
.
Unlock
()
δf
()
operational
:=
(
node
.
mlink
!=
nil
)
&&
node
.
s
tate
.
IsOperational
()
operational
:=
(
node
.
mlink
!=
nil
)
&&
node
.
Node
.
S
tate
.
IsOperational
()
//fmt.Printf("\nupdateOperatinal: %v\n", operational)
//fmt.Printf(" mlink: %s\n", node.mlink)
...
...
@@ -435,7 +436,7 @@ func (node *_MasteredNode) WithOperational(ctx context.Context, f func(mlink *ne
// node.operational=y and node.opMu is rlocked
defer
node
.
opMu
.
RUnlock
()
return
f
(
node
.
mlink
,
&
node
.
s
tate
)
return
f
(
node
.
mlink
,
&
node
.
Node
.
S
tate
)
}
...
...
@@ -448,17 +449,17 @@ func (node *_MasteredNode) updateNodeTab(ctx context.Context, msg *proto.NotifyN
// XXX msg.IdTime ?
for
_
,
nodeInfo
:=
range
msg
.
NodeList
{
log
.
Infof
(
ctx
,
"<- node: %v"
,
nodeInfo
)
node
.
s
tate
.
NodeTab
.
Update
(
nodeInfo
)
node
.
Node
.
S
tate
.
NodeTab
.
Update
(
nodeInfo
)
// we have to provide IdTime when requesting identification to other peers
// (e.g. Spy checks this is what master broadcast them and if not replies "unknown by master")
// TODO .State = DOWN -> ResetLink
if
nodeInfo
.
NID
==
node
.
m
yInfo
.
NID
{
if
nodeInfo
.
NID
==
node
.
Node
.
M
yInfo
.
NID
{
// XXX recheck locking
// XXX do .myInfo = nodeInfo ?
node
.
m
yInfo
.
IdTime
=
nodeInfo
.
IdTime
node
.
Node
.
M
yInfo
.
IdTime
=
nodeInfo
.
IdTime
// NEO/py currently employs this hack
// FIXME -> better it be separate command and handled cleanly
...
...
@@ -471,6 +472,6 @@ func (node *_MasteredNode) updateNodeTab(ctx context.Context, msg *proto.NotifyN
}
// XXX logging under lock ok?
log
.
Infof
(
ctx
,
"full nodetab:
\n
%s"
,
node
.
s
tate
.
NodeTab
)
log
.
Infof
(
ctx
,
"full nodetab:
\n
%s"
,
node
.
Node
.
S
tate
.
NodeTab
)
return
nil
}
go/neo/storage.go
View file @
cbe904d8
...
...
@@ -48,7 +48,7 @@ import (
//
// Storage implements only NEO protocol logic with data being persisted via provided storage.Backend.
type
Storage
struct
{
node
*
_MasteredNode
node
*
_MasteredNode
// XXX -> nodem ?
// context for providing operational service
// it is renewed every time master tells us StartOpertion, so users
...
...
@@ -91,7 +91,7 @@ func (stor *Storage) Run(ctx context.Context, l xnet.Listener) (err error) {
if
err
!=
nil
{
return
err
}
stor
.
node
.
m
yInfo
.
Addr
=
naddr
stor
.
node
.
Node
.
M
yInfo
.
Addr
=
naddr
// wrap listener with link / identificaton hello checker
lli
:=
xneo
.
NewListener
(
neonet
.
NewLinkListener
(
l
))
...
...
@@ -208,16 +208,16 @@ func (stor *Storage) m1initialize1(ctx context.Context, req neonet.Request) erro
case
*
proto
.
Recovery
:
err
=
req
.
Reply
(
&
proto
.
AnswerRecovery
{
PTid
:
stor
.
node
.
s
tate
.
PartTab
.
PTid
,
PTid
:
stor
.
node
.
Node
.
S
tate
.
PartTab
.
PTid
,
BackupTid
:
proto
.
INVALID_TID
,
TruncateTid
:
proto
.
INVALID_TID
})
case
*
proto
.
AskPartitionTable
:
// TODO initially read PT from disk
err
=
req
.
Reply
(
&
proto
.
AnswerPartitionTable
{
PTid
:
stor
.
node
.
s
tate
.
PartTab
.
PTid
,
PTid
:
stor
.
node
.
Node
.
S
tate
.
PartTab
.
PTid
,
NumReplicas
:
0
,
// FIXME hardcoded; NEO/py uses this as n(replica)-1
RowList
:
stor
.
node
.
s
tate
.
PartTab
.
Dump
()})
RowList
:
stor
.
node
.
Node
.
S
tate
.
PartTab
.
Dump
()})
case
*
proto
.
LockedTransactions
:
// XXX r/o stub
...
...
@@ -309,7 +309,7 @@ func (stor *Storage) identify(idReq *proto.RequestIdentification) (proto.Msg, bo
if
idReq
.
NodeType
!=
proto
.
CLIENT
{
return
&
proto
.
Error
{
proto
.
PROTOCOL_ERROR
,
"only clients are accepted"
},
false
}
if
idReq
.
ClusterName
!=
stor
.
node
.
ClusterName
{
if
idReq
.
ClusterName
!=
stor
.
node
.
Node
.
ClusterName
{
return
&
proto
.
Error
{
proto
.
PROTOCOL_ERROR
,
"cluster name mismatch"
},
false
}
...
...
@@ -323,8 +323,8 @@ func (stor *Storage) identify(idReq *proto.RequestIdentification) (proto.Msg, bo
}
return
&
proto
.
AcceptIdentification
{
NodeType
:
stor
.
node
.
m
yInfo
.
Type
,
MyNID
:
stor
.
node
.
m
yInfo
.
NID
,
// XXX lock wrt update
NodeType
:
stor
.
node
.
Node
.
M
yInfo
.
Type
,
MyNID
:
stor
.
node
.
Node
.
M
yInfo
.
NID
,
// XXX lock wrt update
YourNID
:
idReq
.
NID
,
},
true
}
...
...
go/neo/t_cluster_test.go
View file @
cbe904d8
...
...
@@ -292,7 +292,7 @@ func (t *tCluster) Master(name string) ITestMaster {
func
(
t
*
tCluster
)
NewStorage
(
name
,
masterAddr
string
,
back
storage
.
Backend
)
ITestStorage
{
tnode
:=
t
.
registerNewNode
(
name
)
s
:=
tNewStorage
(
t
.
name
,
masterAddr
,
":1"
,
tnode
.
net
,
back
)
t
.
gotracer
.
RegisterNode
(
s
.
node
,
name
)
t
.
gotracer
.
RegisterNode
(
s
.
node
.
Node
,
name
)
t
.
runWG
.
Go
(
func
(
ctx
context
.
Context
)
error
{
return
s
.
Run
(
ctx
)
})
...
...
@@ -311,7 +311,7 @@ func (t *tCluster) Storage(name string) ITestStorage {
func
(
t
*
tCluster
)
NewClient
(
name
,
masterAddr
string
)
ITestClient
{
tnode
:=
t
.
registerNewNode
(
name
)
c
:=
NewClient
(
t
.
name
,
masterAddr
,
tnode
.
net
)
t
.
gotracer
.
RegisterNode
(
c
.
node
,
name
)
t
.
gotracer
.
RegisterNode
(
c
.
node
m
.
Node
,
name
)
t
.
runWG
.
Go
(
func
(
ctx
context
.
Context
)
error
{
return
c
.
Run
(
ctx
)
})
...
...
@@ -343,7 +343,7 @@ func tNewStorage(clusterName, masterAddr, serveAddr string, net xnet.Networker,
}
func
(
s
*
tStorage
)
Run
(
ctx
context
.
Context
)
error
{
l
,
err
:=
s
.
node
.
Net
.
Listen
(
ctx
,
s
.
serveAddr
)
l
,
err
:=
s
.
node
.
N
ode
.
N
et
.
Listen
(
ctx
,
s
.
serveAddr
)
if
err
!=
nil
{
return
err
}
...
...
go/neo/t_tracego_test.go
View file @
cbe904d8
...
...
@@ -38,8 +38,7 @@ type TraceCollector struct {
pg
*
tracing
.
ProbeGroup
rx
interface
{
RxEvent
(
interface
{})
}
// node2Name map[*xneo.Node]string
node2Name
map
[
interface
{}]
string
// XXX -> *_MasteredNode
node2Name
map
[
*
xneo
.
Node
]
string
nodeTab2Owner
map
[
*
xneo
.
NodeTable
]
string
clusterState2Owner
map
[
*
proto
.
ClusterState
]
string
}
...
...
@@ -49,7 +48,7 @@ func NewTraceCollector(rx interface { RxEvent(interface{}) }) *TraceCollector {
pg
:
&
tracing
.
ProbeGroup
{},
rx
:
rx
,
node2Name
:
make
(
map
[
interface
{}
]
string
),
node2Name
:
make
(
map
[
*
xneo
.
Node
]
string
),
nodeTab2Owner
:
make
(
map
[
*
xneo
.
NodeTable
]
string
),
clusterState2Owner
:
make
(
map
[
*
proto
.
ClusterState
]
string
),
}
...
...
@@ -78,26 +77,15 @@ func (t *TraceCollector) Detach() {
//
// This way it can translate e.g. *NodeTable -> owner node name when creating
// corresponding event.
func
(
t
*
TraceCollector
)
RegisterNode
(
node
/*XXX -> *_MasteredNode*/
interface
{}
,
name
string
)
{
func
(
t
*
TraceCollector
)
RegisterNode
(
node
*
xneo
.
Node
,
name
string
)
{
tracing
.
Lock
()
defer
tracing
.
Unlock
()
// XXX verify there is no duplicate names
// XXX verify the same pointer is not registerd twice
switch
node
:=
node
.
(
type
)
{
default
:
panic
(
/*bad type*/
node
)
case
*
xneo
.
Node
:
t
.
node2Name
[
node
]
=
name
t
.
nodeTab2Owner
[
node
.
NodeTab
]
=
name
t
.
clusterState2Owner
[
&
node
.
ClusterState
]
=
name
case
*
_MasteredNode
:
t
.
node2Name
[
node
]
=
name
t
.
nodeTab2Owner
[
node
.
state
.
NodeTab
]
=
name
t
.
clusterState2Owner
[
&
node
.
state
.
Code
]
=
name
}
t
.
node2Name
[
node
]
=
name
t
.
nodeTab2Owner
[
node
.
State
.
NodeTab
]
=
name
t
.
clusterState2Owner
[
&
node
.
State
.
Code
]
=
name
}
func
(
t
*
TraceCollector
)
TraceNetDial
(
ev
*
xnet
.
TraceDial
)
{
...
...
go/neo/xneo/xneo.go
View file @
cbe904d8
...
...
@@ -54,8 +54,9 @@ type Node struct {
Net
xnet
.
Networker
// network AP we are sending/receiving on
MasterAddr
string
// address of current master TODO -> masterRegistry
// XXX reconsider not using State and have just .NodeTab, .PartTab, .ClusterState
StateMu
sync
.
RWMutex
// <- XXX unexport
state
ClusterState
// nodeTab/partTab/stateCode
State
ClusterState
// nodeTab/partTab/stateCode XXX unexport?
// NodeTab *NodeTable // information about nodes in the cluster
// PartTab *PartitionTable // information about data distribution in the cluster
// ClusterState proto.ClusterState // master idea about cluster state
...
...
@@ -78,13 +79,13 @@ func NewNode(typ proto.NodeType, clusterName string, net xnet.Networker, masterA
Net
:
net
,
MasterAddr
:
masterAddr
,
s
tate
:
ClusterState
{
S
tate
:
ClusterState
{
NodeTab
:
&
NodeTable
{},
PartTab
:
&
PartitionTable
{},
Code
:
-
1
,
// invalid
},
}
node
.
s
tate
.
NodeTab
.
localNode
=
node
node
.
S
tate
.
NodeTab
.
localNode
=
node
return
node
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment