Commit 41a120c1 authored by Kirill Smelkov's avatar Kirill Smelkov

go/neo/neonet: tests: Introduce T

This represents a neonet testing environment and in the future will be
used to run tests under different protocol versions, protocol encodings, etc.

For now it is noop wrapper around testing.T + t.bin that wraps []byte
and will be amended to return something different depending on t's
encoding.
parent e407f725
......@@ -42,6 +42,22 @@ import (
"github.com/pkg/errors"
)
// T is neonet testing environment.
type T struct {
*testing.T
}
// Verify tests f for all possible environments.
func Verify(t *testing.T, f func(*T)) {
f(&T{t})
}
// bin returns payload for raw binary data as it would-be encoded in t.
func (t *T) bin(data string) []byte {
return []byte(data)
}
func xclose(c io.Closer) {
err := c.Close()
exc.Raiseif(err)
......@@ -118,7 +134,7 @@ func (c *Conn) mkpkt(msgcode uint16, payload []byte) *pktBuf {
}
// Verify pktBuf is as expected.
func xverifyPkt(pkt *pktBuf, connid uint32, msgcode uint16, payload []byte) {
func (t *T) xverifyPkt(pkt *pktBuf, connid uint32, msgcode uint16, payload []byte) {
errv := xerr.Errorv{}
h := pkt.Header()
// TODO include caller location
......@@ -140,10 +156,10 @@ func xverifyPkt(pkt *pktBuf, connid uint32, msgcode uint16, payload []byte) {
}
// Verify pktBuf to match expected message.
func xverifyPktMsg(pkt *pktBuf, connid uint32, msg proto.Msg) {
func (t *T) xverifyPktMsg(pkt *pktBuf, connid uint32, msg proto.Msg) {
data := make([]byte, msg.NEOMsgEncodedLen())
msg.NEOMsgEncode(data)
xverifyPkt(pkt, connid, msg.NEOMsgCode(), data)
t.xverifyPkt(pkt, connid, msg.NEOMsgCode(), data)
}
// delay a bit.
......@@ -161,23 +177,27 @@ func tdelay() {
}
// create NodeLinks connected via net.Pipe
func _nodeLinkPipe(flags1, flags2 _LinkRole) (nl1, nl2 *NodeLink) {
func (t *T) _nodeLinkPipe(flags1, flags2 _LinkRole) (nl1, nl2 *NodeLink) {
node1, node2 := net.Pipe()
nl1 = newNodeLink(node1, _LinkClient|flags1)
nl2 = newNodeLink(node2, _LinkServer|flags2)
return nl1, nl2
}
func nodeLinkPipe() (nl1, nl2 *NodeLink) {
return _nodeLinkPipe(0, 0)
func (t *T) nodeLinkPipe() (nl1, nl2 *NodeLink) {
return t._nodeLinkPipe(0, 0)
}
func TestNodeLink(t *testing.T) {
Verify(t, _TestNodeLink)
}
func _TestNodeLink(t *T) {
// TODO catch exception -> add proper location from it -> t.Fatal (see git-backup)
b := t.bin
bg := context.Background()
// Close vs recvPkt
nl1, nl2 := _nodeLinkPipe(linkNoRecvSend, linkNoRecvSend)
nl1, nl2 := t._nodeLinkPipe(linkNoRecvSend, linkNoRecvSend)
wg := xsync.NewWorkGroup(bg)
gox(wg, func(_ context.Context) {
tdelay()
......@@ -191,7 +211,7 @@ func TestNodeLink(t *testing.T) {
xclose(nl2)
// Close vs sendPkt
nl1, nl2 = _nodeLinkPipe(linkNoRecvSend, linkNoRecvSend)
nl1, nl2 = t._nodeLinkPipe(linkNoRecvSend, linkNoRecvSend)
wg = xsync.NewWorkGroup(bg)
gox(wg, func(_ context.Context) {
tdelay()
......@@ -206,7 +226,7 @@ func TestNodeLink(t *testing.T) {
xclose(nl2)
// {Close,CloseAccept} vs Accept
nl1, nl2 = _nodeLinkPipe(linkNoRecvSend, linkNoRecvSend)
nl1, nl2 = t._nodeLinkPipe(linkNoRecvSend, linkNoRecvSend)
wg = xsync.NewWorkGroup(bg)
gox(wg, func(_ context.Context) {
tdelay()
......@@ -234,7 +254,7 @@ func TestNodeLink(t *testing.T) {
xclose(nl1)
// Close vs recvPkt on another side
nl1, nl2 = _nodeLinkPipe(linkNoRecvSend, linkNoRecvSend)
nl1, nl2 = t._nodeLinkPipe(linkNoRecvSend, linkNoRecvSend)
wg = xsync.NewWorkGroup(bg)
gox(wg, func(_ context.Context) {
tdelay()
......@@ -248,7 +268,7 @@ func TestNodeLink(t *testing.T) {
xclose(nl1)
// Close vs sendPkt on another side
nl1, nl2 = _nodeLinkPipe(linkNoRecvSend, linkNoRecvSend)
nl1, nl2 = t._nodeLinkPipe(linkNoRecvSend, linkNoRecvSend)
wg = xsync.NewWorkGroup(bg)
gox(wg, func(_ context.Context) {
tdelay()
......@@ -263,23 +283,23 @@ func TestNodeLink(t *testing.T) {
xclose(nl1)
// raw exchange
nl1, nl2 = _nodeLinkPipe(linkNoRecvSend, linkNoRecvSend)
nl1, nl2 = t._nodeLinkPipe(linkNoRecvSend, linkNoRecvSend)
wg = xsync.NewWorkGroup(bg)
okch := make(chan int, 2)
gox(wg, func(_ context.Context) {
// send ping; wait for pong
pkt := _mkpkt(1, 2, []byte("ping"))
pkt := _mkpkt(1, 2, b("ping"))
xsendPkt(nl1, pkt)
pkt = xrecvPkt(nl1)
xverifyPkt(pkt, 3, 4, []byte("pong"))
t.xverifyPkt(pkt, 3, 4, b("pong"))
okch <- 1
})
gox(wg, func(_ context.Context) {
// wait for ping; send pong
pkt = xrecvPkt(nl2)
xverifyPkt(pkt, 1, 2, []byte("ping"))
pkt = _mkpkt(3, 4, []byte("pong"))
t.xverifyPkt(pkt, 1, 2, b("ping"))
pkt = _mkpkt(3, 4, b("pong"))
xsendPkt(nl2, pkt)
okch <- 2
})
......@@ -309,7 +329,7 @@ func TestNodeLink(t *testing.T) {
// ---- connections on top of nodelink ----
// Close vs recvPkt
nl1, nl2 = _nodeLinkPipe(0, linkNoRecvSend)
nl1, nl2 = t._nodeLinkPipe(0, linkNoRecvSend)
c = xnewconn(nl1)
wg = xsync.NewWorkGroup(bg)
gox(wg, func(_ context.Context) {
......@@ -325,14 +345,14 @@ func TestNodeLink(t *testing.T) {
xclose(nl2)
// Close vs sendPkt
nl1, nl2 = _nodeLinkPipe(0, linkNoRecvSend)
nl1, nl2 = t._nodeLinkPipe(0, linkNoRecvSend)
c = xnewconn(nl1)
wg = xsync.NewWorkGroup(bg)
gox(wg, func(_ context.Context) {
tdelay()
xclose(c)
})
pkt = c.mkpkt(0, []byte("data"))
pkt = c.mkpkt(0, b("data"))
err = c.sendPkt(pkt)
if xconnError(err) != ErrClosedConn {
t.Fatalf("Conn.sendPkt() after close: err = %v", err)
......@@ -350,7 +370,7 @@ func TestNodeLink(t *testing.T) {
}
})
gox(wg, func(_ context.Context) {
pkt := c12.mkpkt(0, []byte("data"))
pkt := c12.mkpkt(0, b("data"))
err := c12.sendPkt(pkt)
if xconnError(err) != ErrLinkClosed {
exc.Raisef("Conn.sendPkt() after NodeLink close: err = %v", err)
......@@ -364,7 +384,7 @@ func TestNodeLink(t *testing.T) {
xclose(nl2)
// NodeLink.Close vs Conn.sendPkt/recvPkt and Accept on another side
nl1, nl2 = _nodeLinkPipe(linkNoRecvSend, 0)
nl1, nl2 = t._nodeLinkPipe(linkNoRecvSend, 0)
c21 := xnewconn(nl2)
c22 := xnewconn(nl2)
c23 := xnewconn(nl2)
......@@ -382,7 +402,7 @@ func TestNodeLink(t *testing.T) {
errRecv = cerr
})
gox(wg, func(_ context.Context) {
pkt := c22.mkpkt(0, []byte("data"))
pkt := c22.mkpkt(0, b("data"))
err := c22.sendPkt(pkt)
want := io.ErrClosedPipe // always this in both due to peer close or recvPkt waking up and closing nl2
if xconnError(err) != want {
......@@ -419,7 +439,7 @@ func TestNodeLink(t *testing.T) {
if !(pkt == nil && xconnError(err) == errRecv) {
t.Fatalf("Conn.recvPkt 2 after peer NodeLink shutdown: pkt = %v err = %v", pkt, err)
}
err = c23.sendPkt(c23.mkpkt(0, []byte("data")))
err = c23.sendPkt(c23.mkpkt(0, b("data")))
if xconnError(err) != ErrLinkDown {
t.Fatalf("Conn.sendPkt 2 after peer NodeLink shutdown: %v", err)
}
......@@ -429,7 +449,7 @@ func TestNodeLink(t *testing.T) {
if !(pkt == nil && xconnError(err) == ErrLinkDown) {
t.Fatalf("Conn.recvPkt after NodeLink shutdown: pkt = %v err = %v", pkt, err)
}
err = c22.sendPkt(c22.mkpkt(0, []byte("data")))
err = c22.sendPkt(c22.mkpkt(0, b("data")))
if xconnError(err) != ErrLinkDown {
t.Fatalf("Conn.sendPkt after NodeLink shutdown: %v", err)
}
......@@ -440,7 +460,7 @@ func TestNodeLink(t *testing.T) {
if !(pkt == nil && xconnError(err) == ErrClosedConn) {
t.Fatalf("Conn.recvPkt after close but only stopped NodeLink: pkt = %v err = %v", pkt, err)
}
err = c23.sendPkt(c23.mkpkt(0, []byte("data")))
err = c23.sendPkt(c23.mkpkt(0, b("data")))
if xconnError(err) != ErrClosedConn {
t.Fatalf("Conn.sendPkt after close but only stopped NodeLink: %v", err)
}
......@@ -451,7 +471,7 @@ func TestNodeLink(t *testing.T) {
if !(pkt == nil && xconnError(err) == ErrLinkClosed) {
t.Fatalf("Conn.recvPkt after NodeLink shutdown: pkt = %v err = %v", pkt, err)
}
err = c22.sendPkt(c22.mkpkt(0, []byte("data")))
err = c22.sendPkt(c22.mkpkt(0, b("data")))
if xconnError(err) != ErrLinkClosed {
t.Fatalf("Conn.sendPkt after NodeLink shutdown: %v", err)
}
......@@ -472,7 +492,7 @@ func TestNodeLink(t *testing.T) {
if !(pkt == nil && xconnError(err) == ErrClosedConn) {
t.Fatalf("Conn.recvPkt after close and NodeLink close: pkt = %v err = %v", pkt, err)
}
err = c22.sendPkt(c22.mkpkt(0, []byte("data")))
err = c22.sendPkt(c22.mkpkt(0, b("data")))
if xconnError(err) != ErrClosedConn {
t.Fatalf("Conn.sendPkt after close and NodeLink close: %v", err)
}
......@@ -482,7 +502,7 @@ func TestNodeLink(t *testing.T) {
connKeepClosed = 10 * time.Millisecond
// Conn accept + exchange
nl1, nl2 = nodeLinkPipe()
nl1, nl2 = t.nodeLinkPipe()
nl1.CloseAccept()
wg = xsync.NewWorkGroup(bg)
closed := make(chan int)
......@@ -490,15 +510,15 @@ func TestNodeLink(t *testing.T) {
c := xaccept(nl2)
pkt := xrecvPkt(c)
xverifyPkt(pkt, c.connId, 33, []byte("ping"))
t.xverifyPkt(pkt, c.connId, 33, b("ping"))
// change pkt a bit and send it back
xsendPkt(c, c.mkpkt(34, []byte("pong")))
xsendPkt(c, c.mkpkt(34, b("pong")))
// one more time
pkt = xrecvPkt(c)
xverifyPkt(pkt, c.connId, 35, []byte("ping2"))
xsendPkt(c, c.mkpkt(36, []byte("pong2")))
t.xverifyPkt(pkt, c.connId, 35, b("ping2"))
xsendPkt(c, c.mkpkt(36, b("pong2")))
xclose(c)
closed <- 1
......@@ -506,52 +526,52 @@ func TestNodeLink(t *testing.T) {
// once again as ^^^ but finish only with CloseRecv
c2 := xaccept(nl2)
pkt = xrecvPkt(c2)
xverifyPkt(pkt, c2.connId, 41, []byte("ping5"))
xsendPkt(c2, c2.mkpkt(42, []byte("pong5")))
t.xverifyPkt(pkt, c2.connId, 41, b("ping5"))
xsendPkt(c2, c2.mkpkt(42, b("pong5")))
c2.CloseRecv()
closed <- 2
// "connection refused" when trying to connect to not-listening peer
c = xnewconn(nl2) // XXX should get error here?
xsendPkt(c, c.mkpkt(38, []byte("pong3")))
xsendPkt(c, c.mkpkt(38, b("pong3")))
pkt = xrecvPkt(c)
xverifyPktMsg(pkt, c.connId, errConnRefused)
xsendPkt(c, c.mkpkt(40, []byte("pong4"))) // once again
t.xverifyPktMsg(pkt, c.connId, errConnRefused)
xsendPkt(c, c.mkpkt(40, b("pong4"))) // once again
pkt = xrecvPkt(c)
xverifyPktMsg(pkt, c.connId, errConnRefused)
t.xverifyPktMsg(pkt, c.connId, errConnRefused)
xclose(c)
})
c1 := xnewconn(nl1)
xsendPkt(c1, c1.mkpkt(33, []byte("ping")))
xsendPkt(c1, c1.mkpkt(33, b("ping")))
pkt = xrecvPkt(c1)
xverifyPkt(pkt, c1.connId, 34, []byte("pong"))
xsendPkt(c1, c1.mkpkt(35, []byte("ping2")))
t.xverifyPkt(pkt, c1.connId, 34, b("pong"))
xsendPkt(c1, c1.mkpkt(35, b("ping2")))
pkt = xrecvPkt(c1)
xverifyPkt(pkt, c1.connId, 36, []byte("pong2"))
t.xverifyPkt(pkt, c1.connId, 36, b("pong2"))
// "connection closed" after peer closed its end
<-closed
xsendPkt(c1, c1.mkpkt(37, []byte("ping3")))
xsendPkt(c1, c1.mkpkt(37, b("ping3")))
pkt = xrecvPkt(c1)
xverifyPktMsg(pkt, c1.connId, errConnClosed)
xsendPkt(c1, c1.mkpkt(39, []byte("ping4"))) // once again
t.xverifyPktMsg(pkt, c1.connId, errConnClosed)
xsendPkt(c1, c1.mkpkt(39, b("ping4"))) // once again
pkt = xrecvPkt(c1)
xverifyPktMsg(pkt, c1.connId, errConnClosed)
t.xverifyPktMsg(pkt, c1.connId, errConnClosed)
// XXX also should get EOF on recv
// one more time but now peer does only .CloseRecv()
c2 := xnewconn(nl1)
xsendPkt(c2, c2.mkpkt(41, []byte("ping5")))
xsendPkt(c2, c2.mkpkt(41, b("ping5")))
pkt = xrecvPkt(c2)
xverifyPkt(pkt, c2.connId, 42, []byte("pong5"))
t.xverifyPkt(pkt, c2.connId, 42, b("pong5"))
<-closed
xsendPkt(c2, c2.mkpkt(41, []byte("ping6")))
xsendPkt(c2, c2.mkpkt(41, b("ping6")))
pkt = xrecvPkt(c2)
xverifyPktMsg(pkt, c2.connId, errConnClosed)
t.xverifyPktMsg(pkt, c2.connId, errConnClosed)
xwait(wg)
......@@ -577,7 +597,7 @@ func TestNodeLink(t *testing.T) {
connKeepClosed = saveKeepClosed
// test 2 channels with replies coming in reversed time order
nl1, nl2 = nodeLinkPipe()
nl1, nl2 = t.nodeLinkPipe()
wg = xsync.NewWorkGroup(bg)
replyOrder := map[uint16]struct { // "order" in which to process requests
start chan struct{} // processing starts when start chan is ready
......@@ -613,13 +633,13 @@ func TestNodeLink(t *testing.T) {
c1 = xnewconn(nl1)
c2 = xnewconn(nl1)
xsendPkt(c1, c1.mkpkt(1, []byte("")))
xsendPkt(c2, c2.mkpkt(2, []byte("")))
xsendPkt(c1, c1.mkpkt(1, b("")))
xsendPkt(c2, c2.mkpkt(2, b("")))
// replies must be coming in reverse order
xechoWait := func(c *Conn, msgCode uint16) {
pkt := xrecvPkt(c)
xverifyPkt(pkt, c.connId, msgCode, []byte(""))
t.xverifyPkt(pkt, c.connId, msgCode, b(""))
}
xechoWait(c2, 2)
xechoWait(c1, 1)
......@@ -663,10 +683,13 @@ func xverifyMsg(msg1, msg2 proto.Msg) {
}
func TestRecv1Mode(t *testing.T) {
Verify(t, _TestRecv1Mode)
}
func _TestRecv1Mode(t *T) {
bg := context.Background()
// Send1
nl1, nl2 := nodeLinkPipe()
nl1, nl2 := t.nodeLinkPipe()
wg := xsync.NewWorkGroup(bg)
sync := make(chan int)
gox(wg, func(_ context.Context) {
......@@ -730,7 +753,10 @@ func TestRecv1Mode(t *testing.T) {
//
// bug triggers under -race.
func TestLightCloseVsLinkShutdown(t *testing.T) {
nl1, nl2 := nodeLinkPipe()
Verify(t, _TestLightCloseVsLinkShutdown)
}
func _TestLightCloseVsLinkShutdown(t *T) {
nl1, nl2 := t.nodeLinkPipe()
wg := xsync.NewWorkGroup(context.Background())
c := xnewconn(nl1)
......
......@@ -43,6 +43,9 @@ func _xhandshakeServer(ctx context.Context, c net.Conn, version uint32) {
}
func TestHandshake(t *testing.T) {
Verify(t, _TestHandshake)
}
func _TestHandshake(t *T) {
bg := context.Background()
// handshake ok
p1, p2 := net.Pipe()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment