Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neo
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Stefane Fermigier
neo
Commits
eb2becec
Commit
eb2becec
authored
May 23, 2017
by
Kirill Smelkov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
.
parent
36d2c08e
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
280 additions
and
47 deletions
+280
-47
go/neo/master.go
go/neo/master.go
+185
-40
go/neo/nodetab.go
go/neo/nodetab.go
+93
-5
go/neo/parttab.go
go/neo/parttab.go
+2
-2
No files found.
go/neo/master.go
View file @
eb2becec
...
@@ -25,17 +25,24 @@ import (
...
@@ -25,17 +25,24 @@ import (
"io"
"io"
"log"
"log"
"os"
"os"
"sync"
)
)
// Master is a node overseeing and managing how whole NEO cluster works
// Master is a node overseeing and managing how whole NEO cluster works
type
Master
struct
{
type
Master
struct
{
clusterName
string
clusterName
string
clusterState
ClusterState
// master manages node and partition tables and broadcast their updates
// master manages node and partition tables and broadcast their updates
// to all nodes in cluster
// to all nodes in cluster
nodeTab
NodeTable
stateMu
sync
.
RWMutex
partTab
PartitionTable
nodeTab
NodeTable
partTab
PartitionTable
clusterState
ClusterState
//nodeEventQ chan ... // for node connected / disconnected events
//txnCommittedQ chan ... // TODO for when txn is committed
}
}
func
NewMaster
(
clusterName
string
)
*
Master
{
func
NewMaster
(
clusterName
string
)
*
Master
{
...
@@ -53,11 +60,45 @@ func (m *Master) SetClusterState(state ClusterState) {
...
@@ -53,11 +60,45 @@ func (m *Master) SetClusterState(state ClusterState) {
// XXX actions ?
// XXX actions ?
}
}
// run implements main master cluster management logic: node tracking, cluster
// state updates, scheduling data movement between storage nodes etc
/*
func (m *Master) run(ctx context.Context) {
for {
select {
case <-ctx.Done():
panic("TODO")
case nodeEvent := <-m.nodeEventQ:
// TODO update nodeTab
// add info to nodeTab
m.nodeTab.Lock()
m.nodeTab.Add(&Node{nodeInfo, link})
m.nodeTab.Unlock()
// TODO notify nodeTab changes
// TODO consider adjusting partTab
// TODO consider how this maybe adjust cluster state
//case txnCommitted := <-m.txnCommittedQ:
}
}
}
*/
// ServeLink serves incoming node-node link connection
// ServeLink serves incoming node-node link connection
// XXX +error return?
// XXX +error return?
func
(
m
*
Master
)
ServeLink
(
ctx
context
.
Context
,
link
*
NodeLink
)
{
func
(
m
*
Master
)
ServeLink
(
ctx
context
.
Context
,
link
*
NodeLink
)
{
fmt
.
Printf
(
"master: %s: serving new node
\n
"
,
link
)
fmt
.
Printf
(
"master: %s: serving new node
\n
"
,
link
)
//var node *Node
// close link when either cancelling or returning (e.g. due to an error)
// close link when either cancelling or returning (e.g. due to an error)
// ( when cancelling - link.Close will signal to all current IO to
// ( when cancelling - link.Close will signal to all current IO to
// terminate with an error )
// terminate with an error )
...
@@ -83,28 +124,161 @@ func (m *Master) ServeLink(ctx context.Context, link *NodeLink) {
...
@@ -83,28 +124,161 @@ func (m *Master) ServeLink(ctx context.Context, link *NodeLink) {
return
return
}
}
// add info to nodeTab
// notify main logic node connects/disconnects
m
.
nodeTab
.
Lock
()
_
=
nodeInfo
m
.
nodeTab
.
Add
(
&
Node
{
nodeInfo
,
link
})
/*
m
.
nodeTab
.
Unlock
()
node = &Node{nodeInfo, link}
m.nodeq <- node
defer func() {
node.state = DOWN
m.nodeq <- node
}()
*/
// subscribe to nodeTab/partTab/clusterState and notify peer with updates
m
.
stateMu
.
Lock
()
//clusterCh := make(chan ClusterState)
nodeCh
,
nodeUnsubscribe
:=
m
.
nodeTab
.
SubscribeBuffered
()
_
=
nodeCh
;
_
=
nodeUnsubscribe
//partCh, partUnsubscribe := m.partTab.SubscribeBuffered()
// TODO cluster subscribe
//m.clusterNotifyv = append(m.clusterNotifyv, clusterCh)
// TODO subscribe to nodeTab and broadcast updates:
//
// NotifyPartitionTable PM -> S, C
// NotifyPartitionTable PM -> S, C
// PartitionChanges PM -> S, C // subset of NotifyPartitionTable (?)
// PartitionChanges PM -> S, C // subset of NotifyPartitionTable (?)
// NotifyNodeIntormation PM -> *
// NotifyNodeIntormation PM -> *
// TODO read initial nodeTab/partTab while still under lock
// TODO send later this initial content to peer
// TODO notify about cluster state changes
// TODO notify about cluster state changes
// ClusterInformation (PM -> * ?)
// ClusterInformation (PM -> * ?)
m
.
stateMu
.
Unlock
()
/*
go func() {
var updates []...
}()
go func() {
var clusterState ClusterState
changed := false
select {
case clusterState = <-clusterCh:
changed = true
}()
*/
// identification passed, now serve other requests
// identification passed, now serve other requests
// client: notify + serve requests
// client: notify + serve requests
m
.
ServeClient
(
ctx
,
link
)
// storage:
// storage:
//
m
.
DriveStorage
(
ctx
,
link
)
}
// ServeClient serves incoming connection on which peer identified itself as client
// XXX +error return?
//func (m *Master) ServeClient(ctx context.Context, conn *Conn) {
func
(
m
*
Master
)
ServeClient
(
ctx
context
.
Context
,
link
*
NodeLink
)
{
// TODO
}
// ---- internal requests for storage driver ----
// storageRecovery asks storage driver to extract cluster recovery information from storage
type
storageRecovery
struct
{
resp
chan
PartitionTable
// XXX +err ?
}
// storageVerify asks storage driver to perform verification (i.e. "data recovery") operation
type
storageVerify
struct
{
// XXX what is result ?
}
// storageStartOperation asks storage driver to start storage node operating
type
storageStartOperation
struct
{
resp
chan
error
// XXX
}
// storageStopOperation asks storage driver to stop storage node oerating
type
storageStopOperation
struct
{
resp
chan
error
}
// DriveStorage serves incoming connection on which peer identified itself as storage
//
// There are 2 connections:
// - notifications: unidirectional M -> S notifications (nodes, parttab, cluster state)
// - control: bidirectional M <-> S
//
// In control communication master always drives the exchange - talking first
// with e.g. a command or request and expects corresponding answer
//
// XXX +error return?
func
(
m
*
Master
)
DriveStorage
(
ctx
context
.
Context
,
link
*
NodeLink
)
{
// ? >UnfinishedTransactions
// ? <AnswerUnfinishedTransactions (none currently)
// TODO go for notify chan
for
{
select
{
case
<-
ctx
.
Done
()
:
return
// XXX recheck
// // request from master to do something
// case mreq := <-xxx:
// switch mreq := mreq.(type) {
// case storageRecovery:
// case storageVerify:
// // TODO
// case storageStartOperation:
// // XXX timeout ?
// // XXX -> chat2 ?
// err = EncodeAndSend(conn, &StartOperation{Backup: false /* XXX hardcoded */})
// if err != nil {
// // XXX err
// }
// pkt, err := RecvAndDecode(conn)
// if err != nil {
// // XXX err
// }
// switch pkt := pkt.(type) {
// default:
// err = fmt.Errorf("unexpected answer: %T", pkt)
// case *NotifyReady:
// }
// // XXX better in m.nodeq ?
// mreq.resp <- err // XXX err ctx
// case storageStopOperation:
// // TODO
// }
}
}
// RECOVERY (master.recovery.RecoveryManager + master.handlers.identification.py)
// RECOVERY (master.recovery.RecoveryManager + master.handlers.identification.py)
// --------
// --------
// """
// """
...
@@ -166,35 +340,6 @@ func (m *Master) ServeLink(ctx context.Context, link *NodeLink) {
...
@@ -166,35 +340,6 @@ func (m *Master) ServeLink(ctx context.Context, link *NodeLink) {
// ...
// ...
//
//
// StopOperation PM -> S
// StopOperation PM -> S
}
// ServeClient serves incoming connection on which peer identified itself as client
// XXX +error return?
func
(
m
*
Master
)
ServeClient
(
ctx
context
.
Context
,
conn
*
Conn
)
{
// TODO
}
// ServeStorage serves incoming connection on which peer identified itself as storage
// XXX +error return?
func
(
m
*
Master
)
ServeStorage
(
ctx
context
.
Context
,
conn
*
Conn
)
{
// TODO
// >Recovery
// <AnswerRecovery
// ? >UnfinishedTransactions
// ? <AnswerUnfinishedTransactions (none currently)
// <NotifyReady
// >StartOperation
// >StopOperation (on shutdown)
}
}
func
(
m
*
Master
)
ServeAdmin
(
ctx
context
.
Context
,
conn
*
Conn
)
{
func
(
m
*
Master
)
ServeAdmin
(
ctx
context
.
Context
,
conn
*
Conn
)
{
...
...
go/neo/nodetab.go
View file @
eb2becec
...
@@ -19,6 +19,8 @@ package neo
...
@@ -19,6 +19,8 @@ package neo
// node management & node table
// node management & node table
import
(
import
(
"bytes"
"fmt"
"sync"
"sync"
)
)
...
@@ -61,13 +63,17 @@ import (
...
@@ -61,13 +63,17 @@ import (
// sure not to accept new connections -> XXX not needed - just stop listening
// sure not to accept new connections -> XXX not needed - just stop listening
// first.
// first.
//
//
// XXX once a node was added to NodeTable its entry never deleted: if e.g. a
// connection to node is lost associated entry is marked as having DOWN (XXX or UNKNOWN ?) node
// state.
//
// NodeTable zero value is valid empty node table.
// NodeTable zero value is valid empty node table.
type
NodeTable
struct
{
type
NodeTable
struct
{
// users have to care locking explicitly
// users have to care locking explicitly
sync
.
RWMutex
sync
.
RWMutex
nodev
[]
Node
nodev
[]
*
Node
subscribev
[]
chan
*
Node
ver
int
// ↑ for versioning XXX do we need this?
ver
int
// ↑ for versioning XXX do we need this?
}
}
...
@@ -81,6 +87,74 @@ type Node struct {
...
@@ -81,6 +87,74 @@ type Node struct {
}
}
// Subscribe subscribes to NodeTable updates
// it returns a channel via which updates will be delivered
func
(
nt
*
NodeTable
)
Subscribe
()
(
ch
chan
*
Node
,
unsubscribe
func
())
{
ch
=
make
(
chan
*
Node
)
// XXX how to specify ch buf size if needed ?
nt
.
subscribev
=
append
(
nt
.
subscribev
,
ch
)
unsubscribe
=
func
()
{
for
i
,
c
:=
range
nt
.
subscribev
{
if
c
==
ch
{
nt
.
subscribev
=
append
(
nt
.
subscribev
[
:
i
],
nt
.
subscribev
[
i
+
1
:
]
...
)
close
(
ch
)
return
}
}
panic
(
"XXX unsubscribe not subscribed channel"
)
}
return
ch
,
unsubscribe
}
// SubscribeBufferred subscribes to NodeTable updates without blocking updater
// it returns a channel via which updates are delivered
// the updates are sent to destination in non-blocking way - if destination channel is not ready
// they will be bufferred.
// it is the caller reponsibility to make sure such buffering does not grow up
// to infinity - via e.g. detecting stuck connections and unsubscribing on shutdown
//
// must be called with stateMu held
func
(
nt
*
NodeTable
)
SubscribeBuffered
()
(
ch
chan
[]
*
Node
,
unsubscribe
func
())
{
in
,
unsubscribe
:=
nt
.
Subscribe
()
ch
=
make
(
chan
[]
*
Node
)
go
func
()
{
var
updatev
[]
*
Node
shutdown
:=
false
for
{
out
:=
ch
if
len
(
updatev
)
==
0
{
if
shutdown
{
// nothing to send and source channel closed
// -> close destination and stop
close
(
ch
)
break
}
out
=
nil
}
select
{
case
update
,
ok
:=
<-
in
:
if
!
ok
{
shutdown
=
true
break
}
// FIXME better merge as same node could be updated several times
updatev
=
append
(
updatev
,
update
)
case
out
<-
updatev
:
updatev
=
nil
}
}
}()
return
ch
,
unsubscribe
}
// UpdateNode updates information about a node
// UpdateNode updates information about a node
func
(
nt
*
NodeTable
)
UpdateNode
(
nodeInfo
NodeInfo
)
{
func
(
nt
*
NodeTable
)
UpdateNode
(
nodeInfo
NodeInfo
)
{
// TODO
// TODO
...
@@ -90,7 +164,7 @@ func (nt *NodeTable) UpdateNode(nodeInfo NodeInfo) {
...
@@ -90,7 +164,7 @@ func (nt *NodeTable) UpdateNode(nodeInfo NodeInfo) {
func
(
nt
*
NodeTable
)
Add
(
node
*
Node
)
{
func
(
nt
*
NodeTable
)
Add
(
node
*
Node
)
{
// XXX check node is already there
// XXX check node is already there
// XXX pass/store node by pointer ?
// XXX pass/store node by pointer ?
nt
.
nodev
=
append
(
nt
.
nodev
,
*
node
)
nt
.
nodev
=
append
(
nt
.
nodev
,
node
)
// TODO notify all nodelink subscribers about new info
// TODO notify all nodelink subscribers about new info
}
}
...
@@ -98,6 +172,19 @@ func (nt *NodeTable) Add(node *Node) {
...
@@ -98,6 +172,19 @@ func (nt *NodeTable) Add(node *Node) {
// TODO subscribe for changes on Add ? (notification via channel)
// TODO subscribe for changes on Add ? (notification via channel)
// Lookup finds node by nodeID
func
(
nt
*
NodeTable
)
Lookup
(
nodeID
NodeID
)
*
Node
{
// FIXME linear scan
for
_
,
node
:=
range
nt
.
nodev
{
if
node
.
Info
.
NodeID
==
nodeID
{
return
node
}
}
return
nil
}
// XXX LookupByAddress ?
func
(
nt
*
NodeTable
)
String
()
string
{
func
(
nt
*
NodeTable
)
String
()
string
{
//nt.RLock() // FIXME -> it must be client
//nt.RLock() // FIXME -> it must be client
...
@@ -105,9 +192,10 @@ func (nt *NodeTable) String() string {
...
@@ -105,9 +192,10 @@ func (nt *NodeTable) String() string {
buf
:=
bytes
.
Buffer
{}
buf
:=
bytes
.
Buffer
{}
for
node
:=
range
nl
.
nodev
{
for
_
,
node
:=
range
nt
.
nodev
{
// XXX recheck output
// XXX recheck output
fmt
.
Fprintf
(
&
buf
,
"%s (%s)
\t
%s
\t
%s
\n
"
,
node
.
NodeID
,
node
.
NodeType
,
node
.
NodeState
,
node
.
Address
)
i
:=
node
.
Info
fmt
.
Fprintf
(
&
buf
,
"%s (%s)
\t
%s
\t
%s
\n
"
,
i
.
NodeID
,
i
.
NodeType
,
i
.
NodeState
,
i
.
Address
)
}
}
return
buf
.
String
()
return
buf
.
String
()
...
...
go/neo/parttab.go
View file @
eb2becec
...
@@ -137,14 +137,14 @@ type PartitionCell struct {
...
@@ -137,14 +137,14 @@ type PartitionCell struct {
//
//
// XXX or keep not only NodeID in PartitionCell - add *Node ?
// XXX or keep not only NodeID in PartitionCell - add *Node ?
func
(
pt
*
PartitionTable
)
Operational
()
bool
{
func
(
pt
*
PartitionTable
)
Operational
()
bool
{
for
ptEntry
:=
range
pt
.
ptTab
{
for
_
,
ptEntry
:=
range
pt
.
ptTab
{
if
len
(
ptEntry
)
==
0
{
if
len
(
ptEntry
)
==
0
{
return
false
return
false
}
}
ok
:=
false
ok
:=
false
cellLoop
:
cellLoop
:
for
cell
:=
range
ptEntry
{
for
_
,
cell
:=
range
ptEntry
{
switch
cell
.
CellState
{
switch
cell
.
CellState
{
case
UP_TO_DATE
,
FEEDING
:
// XXX cell.isReadble in py
case
UP_TO_DATE
,
FEEDING
:
// XXX cell.isReadble in py
ok
=
true
ok
=
true
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment