Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
N
neoppod
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Levin Zimmermann
neoppod
Commits
b8e0b935
Commit
b8e0b935
authored
Aug 02, 2017
by
Kirill Smelkov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
.
parent
32a92075
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
34 additions
and
26 deletions
+34
-26
go/neo/client/client.go
go/neo/client/client.go
+2
-2
go/neo/neo.go
go/neo/neo.go
+5
-2
go/neo/server/master.go
go/neo/server/master.go
+12
-10
go/neo/server/storage.go
go/neo/server/storage.go
+15
-12
No files found.
go/neo/client/client.go
View file @
b8e0b935
...
...
@@ -31,7 +31,7 @@ import (
// Client talks to NEO cluster and exposes access it via ZODB interfaces
type
Client
struct
{
neo
.
NodeCommon
n
ode
n
eo
.
NodeCommon
storLink
*
neo
.
NodeLink
// link to storage node
storConn
*
neo
.
Conn
// XXX main connection to storage
...
...
@@ -107,7 +107,7 @@ func NewClient(storLink *neo.NodeLink) (*Client, error) {
// XXX move -> Run?
// first identify ourselves to peer
accept
,
err
:=
neo
.
IdentifyWith
(
neo
.
STORAGE
,
storLink
,
cli
.
myInfo
,
cli
.
c
lusterName
)
accept
,
err
:=
neo
.
IdentifyWith
(
neo
.
STORAGE
,
storLink
,
cli
.
node
.
MyInfo
,
cli
.
node
.
C
lusterName
)
if
err
!=
nil
{
return
nil
,
err
}
...
...
go/neo/neo.go
View file @
b8e0b935
...
...
@@ -28,6 +28,9 @@ package neo
//go:generate sh -c "go run ../xcommon/tracing/cmd/gotrace/{gotrace,util}.go ."
import
(
"net"
"lab.nexedi.com/kirr/neo/go/xcommon/xnet"
"lab.nexedi.com/kirr/neo/go/zodb"
)
...
...
@@ -44,7 +47,7 @@ const (
// NodeCommon is common data in all NEO nodes: Master, Storage & Client XXX text
// XXX naming -> Node ?
type
NodeCommon
struct
{
MyInfo
neo
.
NodeInfo
// XXX -> only NodeUUID
MyInfo
NodeInfo
// XXX -> only NodeUUID
ClusterName
string
Net
xnet
.
Networker
// network AP we are sending/receiving on
...
...
@@ -68,7 +71,7 @@ func (n *NodeCommon) Listen() (net.Listener, error) {
// NOTE listen("tcp", ":1234") gives l.Addr 0.0.0.0:1234 and
// listen("tcp6", ":1234") gives l.Addr [::]:1234
// -> host is never empty
addr
,
err
:=
neo
.
Addr
(
l
.
Addr
())
addr
,
err
:=
Addr
(
l
.
Addr
())
if
err
!=
nil
{
// XXX -> panic here ?
l
.
Close
()
...
...
go/neo/server/master.go
View file @
b8e0b935
...
...
@@ -86,10 +86,12 @@ func NewMaster(clusterName, serveAddr string, net xnet.Networker) *Master {
}
m
:=
&
Master
{
myInfo
:
neo
.
NodeInfo
{
NodeType
:
neo
.
MASTER
,
Address
:
addr
},
clusterName
:
clusterName
,
net
:
net
,
masterAddr
:
serveAddr
,
// XXX ok?
node
:
neo
.
NodeCommon
{
MyInfo
:
neo
.
NodeInfo
{
NodeType
:
neo
.
MASTER
,
Address
:
addr
},
ClusterName
:
clusterName
,
Net
:
net
,
MasterAddr
:
serveAddr
,
// XXX ok?
},
ctlStart
:
make
(
chan
chan
error
),
ctlStop
:
make
(
chan
chan
struct
{}),
...
...
@@ -99,7 +101,7 @@ func NewMaster(clusterName, serveAddr string, net xnet.Networker) *Master {
nodeLeave
:
make
(
chan
nodeLeave
),
}
m
.
m
yInfo
.
NodeUUID
=
m
.
allocUUID
(
neo
.
MASTER
)
m
.
node
.
M
yInfo
.
NodeUUID
=
m
.
allocUUID
(
neo
.
MASTER
)
// TODO update nodeTab with self
m
.
clusterState
=
neo
.
ClusterRecovering
// XXX no elections - we are the only master
...
...
@@ -109,12 +111,12 @@ func NewMaster(clusterName, serveAddr string, net xnet.Networker) *Master {
// Run starts master node and runs it until ctx is cancelled or fatal error
func
(
m
*
Master
)
Run
(
ctx
context
.
Context
)
error
{
// start listening
l
,
err
:=
m
.
Listen
()
l
,
err
:=
m
.
node
.
Listen
()
if
err
!=
nil
{
return
err
// XXX err ctx
}
m
.
m
asterAddr
=
l
.
Addr
()
.
String
()
m
.
node
.
M
asterAddr
=
l
.
Addr
()
.
String
()
// serve incoming connections
wg
:=
sync
.
WaitGroup
{}
...
...
@@ -152,7 +154,7 @@ func (m *Master) Start() error {
func
(
m
*
Master
)
Stop
()
{
ech
:=
make
(
chan
struct
{})
m
.
ctlStop
<-
ech
return
<-
ech
<-
ech
}
// Shutdown requests all known nodes in the cluster to stop
...
...
@@ -636,7 +638,7 @@ func (m *Master) accept(n nodeCome) (node *neo.Node, ok bool) {
// - NodeType valid
// - IdTimestamp ?
if
n
.
idReq
.
ClusterName
!=
m
.
c
lusterName
{
if
n
.
idReq
.
ClusterName
!=
m
.
node
.
C
lusterName
{
n
.
idResp
<-
&
neo
.
Error
{
neo
.
PROTOCOL_ERROR
,
"cluster name mismatch"
}
// XXX
return
nil
,
false
}
...
...
@@ -668,7 +670,7 @@ func (m *Master) accept(n nodeCome) (node *neo.Node, ok bool) {
n
.
idResp
<-
&
neo
.
AcceptIdentification
{
NodeType
:
neo
.
MASTER
,
MyNodeUUID
:
m
.
m
yInfo
.
NodeUUID
,
MyNodeUUID
:
m
.
node
.
M
yInfo
.
NodeUUID
,
NumPartitions
:
1
,
// FIXME hardcoded
NumReplicas
:
1
,
// FIXME hardcoded
YourNodeUUID
:
uuid
,
...
...
go/neo/server/storage.go
View file @
b8e0b935
...
...
@@ -36,7 +36,7 @@ import (
// Storage is NEO node that keeps data and provides read/write access to it
type
Storage
struct
{
neo
.
NodeCommon
n
ode
n
eo
.
NodeCommon
// context for providing operational service
// it is renewed every time master tells us StartOpertion, so users
...
...
@@ -65,10 +65,13 @@ func NewStorage(cluster, masterAddr, serveAddr string, net xnet.Networker, zstor
}
stor
:=
&
Storage
{
myInfo
:
neo
.
NodeInfo
{
NodeType
:
neo
.
STORAGE
,
Address
:
addr
},
clusterName
:
cluster
,
net
:
net
,
masterAddr
:
masterAddr
,
node
:
neo
.
NodeCommon
{
MyInfo
:
neo
.
NodeInfo
{
NodeType
:
neo
.
STORAGE
,
Address
:
addr
},
ClusterName
:
cluster
,
Net
:
net
,
MasterAddr
:
masterAddr
,
},
zstor
:
zstor
,
}
...
...
@@ -121,9 +124,9 @@ func (stor *Storage) talkMaster(ctx context.Context) error {
// XXX errctx
for
{
fmt
.
Printf
(
"stor: master(%v): connecting
\n
"
,
stor
.
m
asterAddr
)
// XXX info
fmt
.
Printf
(
"stor: master(%v): connecting
\n
"
,
stor
.
node
.
M
asterAddr
)
// XXX info
err
:=
stor
.
talkMaster1
(
ctx
)
fmt
.
Printf
(
"stor: master(%v): %v
\n
"
,
stor
.
m
asterAddr
,
err
)
fmt
.
Printf
(
"stor: master(%v): %v
\n
"
,
stor
.
node
.
M
asterAddr
,
err
)
// TODO if err = shutdown -> return
...
...
@@ -143,7 +146,7 @@ func (stor *Storage) talkMaster(ctx context.Context) error {
// it returns error describing why such cycle had to finish
// XXX distinguish between temporary problems and non-temporary ones?
func
(
stor
*
Storage
)
talkMaster1
(
ctx
context
.
Context
)
(
err
error
)
{
Mlink
,
err
:=
neo
.
Dial
(
ctx
,
stor
.
n
et
,
stor
.
m
asterAddr
)
Mlink
,
err
:=
neo
.
Dial
(
ctx
,
stor
.
n
ode
.
Net
,
stor
.
node
.
M
asterAddr
)
if
err
!=
nil
{
return
err
}
...
...
@@ -155,7 +158,7 @@ func (stor *Storage) talkMaster1(ctx context.Context) (err error) {
}()
// request identification this way registering our node to master
accept
,
err
:=
neo
.
IdentifyWith
(
neo
.
MASTER
,
Mlink
,
stor
.
myInfo
,
stor
.
c
lusterName
)
accept
,
err
:=
neo
.
IdentifyWith
(
neo
.
MASTER
,
Mlink
,
stor
.
node
.
MyInfo
,
stor
.
node
.
C
lusterName
)
if
err
!=
nil
{
return
err
}
...
...
@@ -166,9 +169,9 @@ func (stor *Storage) talkMaster1(ctx context.Context) (err error) {
return
fmt
.
Errorf
(
"TODO for 1-storage POC: Npt: %v Nreplica: %v"
,
accept
.
NumPartitions
,
accept
.
NumReplicas
)
}
if
accept
.
YourNodeUUID
!=
stor
.
m
yInfo
.
NodeUUID
{
if
accept
.
YourNodeUUID
!=
stor
.
node
.
M
yInfo
.
NodeUUID
{
fmt
.
Printf
(
"stor: %v: master told us to have UUID=%v
\n
"
,
Mlink
,
accept
.
YourNodeUUID
)
stor
.
m
yInfo
.
NodeUUID
=
accept
.
YourNodeUUID
stor
.
node
.
M
yInfo
.
NodeUUID
=
accept
.
YourNodeUUID
}
// now handle notifications and commands from master
...
...
@@ -181,7 +184,7 @@ func (stor *Storage) talkMaster1(ctx context.Context) (err error) {
default
:
}
if
err
.
IsShutdown
(
...
)
{
// TODO
if
err
!=
nil
/* TODO .IsShutdown(...) */
{
// TODO
return
err
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment