// Given a node, construct a Peer with the same properties. func NewPeerFromNode(node *Node) (p *Peer, err error) { if node == nil { err = NilNode } else { id := node.GetNodeID().Value() nodeID, err := xi.New(id) if err == nil { var o []xo.OverlayI for i := 0; i < node.SizeOverlays(); i++ { o = append(o, node.GetOverlay(i)) } var ctors []xt.ConnectorI for i := 0; i < node.SizeAcceptors(); i++ { var ctor *xt.TcpConnector ep := node.GetAcceptor(i).GetEndPoint() ctor, err = xt.NewTcpConnector(ep) if err != nil { break } ctors = append(ctors, ctor) } if err == nil { p, err = NewPeer(node.GetName(), nodeID, node.GetCommsPublicKey(), node.GetSigPublicKey(), o, ctors) } } } return }
func NewMemberInfoFromToken(token *XLRegMsg_Token) ( m *xcl.MemberInfo, err error) { var ( ck, sk *rsa.PublicKey ctor xt.ConnectorI ctors []xt.ConnectorI farEnd xt.EndPointI nodeID *xi.NodeID peer *xn.Peer ) if token == nil { err = NilToken } else { nodeID, err = xi.New(token.GetID()) if err == nil { ck, err = xc.RSAPubKeyFromWire(token.GetCommsKey()) if err == nil { sk, err = xc.RSAPubKeyFromWire(token.GetSigKey()) if err == nil { attrs := token.GetAttrs() myEnds := token.GetMyEnds() for i := 0; i < len(myEnds); i++ { myEnd := myEnds[i] farEnd, err = xt.NewTcpEndPoint(myEnd) if err != nil { break } ctor, err = xt.NewTcpConnector(farEnd) if err != nil { break } ctors = append(ctors, ctor) } if err == nil { peer, err = xn.NewPeer(token.GetName(), nodeID, ck, sk, nil, ctors) if err == nil { m = &xcl.MemberInfo{ Attrs: attrs, Peer: peer, } } } } //if err == nil { // m, err = NewMemberInfo(token.GetName(), nodeID, // ck, sk, token.GetAttrs(), token.GetMyEnds()) //} } } } return }
// Create a Peer from information in the Node passed. Endpoints // (and so Overlays) must have already been added to the Node. func (s *XLSuite) peerFromHost(c *C, n *Node) (peer *Peer) { var err error k := len(n.endPoints) ctors := make([]xt.ConnectorI, k) for i := 0; i < k; i++ { ctors[i], err = xt.NewTcpConnector(n.GetEndPoint(i)) c.Assert(err, Equals, nil) } peer = &Peer{connectors: ctors, BaseNode: n.BaseNode} //peer.commsPubKey = n.GetCommsPublicKey() //peer.sigPubKey = n.GetSigPublicKey() return peer }
func (upc *UpaxClient) SessionSetup(proposedVersion uint32) ( upcx *xt.TcpConnection, decidedVersion uint32, err error) { var ( ciphertext1, ciphertext2 []byte cOneShot, cSession *xa.AesSession ) rng := xr.MakeSystemRNG() // Set up connection to server. ----------------------------- ctor, err := xt.NewTcpConnector(upc.serverEnd) if err == nil { var conn xt.ConnectionI conn, err = ctor.Connect(nil) if err == nil { upcx = conn.(*xt.TcpConnection) } } // Send HELLO ----------------------------------------------- if err == nil { upc.Cnx = upcx cOneShot, ciphertext1, err = xa.ClientEncryptHello( proposedVersion, upc.serverCK, rng) } if err == nil { err = upc.WriteData(ciphertext1) } // Process HELLO REPLY -------------------------------------- if err == nil { ciphertext2, err = upc.ReadData() } if err == nil { cSession, decidedVersion, err = xa.ClientDecryptHelloReply( cOneShot, ciphertext2) } // Set up AES engines --------------------------------------- if err == nil { upc.AesSession = *cSession upc.Version = xu.DecimalVersion(decidedVersion) } return }
func (s *XLSuite) TestMockLocalHostTcpCluster(c *C) { if VERBOSITY > 0 { fmt.Println("TEST_MOCK_LOCAL_HOST_TCP_CLUSTER") } var err error const K = 5 nodes, accs := MockLocalHostCluster(K) defer func() { for i := 0; i < K; i++ { if accs[i] != nil { accs[i].Close() } } }() for i := 0; i < K; i++ { c.Assert(nodes, Not(IsNil)) c.Assert(accs, Not(IsNil)) } nameSet := make(map[string]bool) names := make([]string, K) nodeIDs := make([]*xi.NodeID, K) for i := 0; i < K; i++ { names[i] = nodes[i].GetName() _, ok := nameSet[names[i]] c.Assert(ok, Equals, false) nameSet[names[i]] = true // XXX should also verify nodeIDs are unique nodeIDs[i] = nodes[i].GetNodeID() } ar, err := xo.NewCIDRAddrRange("127.0.0.0/8") c.Assert(err, Equals, nil) overlay, err := xo.NewIPOverlay("XO", ar, "tcp", 1.0) c.Assert(err, Equals, nil) _ = overlay accEndPoints := make([]*xt.TcpEndPoint, K) for i := 0; i < K; i++ { accEndPoints[i] = accs[i].GetEndPoint().(*xt.TcpEndPoint) c.Assert(accEndPoints[i], Not(IsNil)) c.Assert(nodes[i].SizeEndPoints(), Equals, 1) c.Assert(nodes[i].SizeAcceptors(), Equals, 1) c.Assert(nodes[i].SizeOverlays(), Equals, 1) c.Assert(overlay.Equal(nodes[i].GetOverlay(0)), Equals, true) } // XXX NEEDS CHECKING FROM HERE commsKeys := make([]*rsa.PublicKey, K) sigKeys := make([]*rsa.PublicKey, K) ctors := make([]*xt.TcpConnector, K) for i := 0; i < K; i++ { commsKeys[i] = nodes[i].GetCommsPublicKey() sigKeys[i] = nodes[i].GetSigPublicKey() ctors[i], err = xt.NewTcpConnector(accEndPoints[i]) c.Assert(err, Equals, nil) } //overlaySlice := []xo.OverlayI{overlay} // peers := make([]*Peer, K) for i := 0; i < K; i++ { //ctorSlice := []xt.ConnectorI{ctors[i]} //_ = ctorSlice //peers[i], err = NewPeer(names[i], nodeIDs[i], commsKeys[i], sigKeys[i], // overlaySlice, ctorSlice) //c.Assert(err, Equals, nil) } // Use the information collected to configure each node. for i := 0; i < K; i++ { //for j := 0; j < K; j++ { // if i != j { // ndx, err := nodes[i].AddPeer(peers[j]) // c.Assert(err, Equals, nil) // var expectedNdx int // if j < i { // expectedNdx = j // } else { // expectedNdx = j - 1 // } // c.Assert(ndx, Equals, expectedNdx) // } //} // GEEP c.Assert(nodes[i].SizeAcceptors(), Equals, 1) // XXX WRONG APPROACH - SizeConnectors() is a Peer // function, and in this case should return 1 for each peer. // c.Assert(nodes[i].SizeConnectors(),Equals, K-1) c.Assert(nodes[i].SizeEndPoints(), Equals, 1) c.Assert(nodes[i].SizeOverlays(), Equals, 1) c.Assert(nodes[i].SizePeers(), Equals, K-1) } // GEEP }
func (s *XLSuite) TestHelloHandler(c *C) { if VERBOSITY > 0 { fmt.Println("TEST_HELLO_HANDLER") } // Create a node and add a mock peer. This is a cluster of 2. nodes, accs := xn.MockLocalHostCluster(2) defer func() { for i := 0; i < 2; i++ { if accs[i] != nil { accs[i].Close() } } }() myNode, peerNode := nodes[0], nodes[1] meAsPeer := peerNode.GetPeer(0) myAcc, peerAcc := accs[0], accs[1] _ = peerAcc // never used c.Assert(myAcc, Not(IsNil)) myAccEP := myAcc.GetEndPoint() myCtor, err := xt.NewTcpConnector(myAccEP) c.Assert(err, IsNil) // myNode's server side stopCh := make(chan bool, 1) // has buffer so won't block stoppedCh := make(chan bool, 1) go func() { for { cnx, err := myAcc.Accept() if err != nil { break } // each connection handled by a separate goroutine go func() { _, _ = NewInHandler(myNode, cnx, stopCh, stoppedCh) }() } }() // -- WELL-FORMED HELLO ----------------------------------------- // Known peer sends Hello with all parameters correct. We reply // with an Ack and advance state to open. conn, err := myCtor.Connect(xt.ANY_TCP_END_POINT) c.Assert(err, IsNil) c.Assert(conn, Not(IsNil)) cnx2 := conn.(*xt.TcpConnection) defer cnx2.Close() oh := &OutHandler{ Node: peerNode, CnxHandler: CnxHandler{Cnx: cnx2, Peer: meAsPeer}} // manually create and send a hello message - // XXX HELLO_MSG IS OBSOLETE; it's done with RSA/AES handshake peerHello, err := MakeHelloMsg(peerNode) c.Assert(err, IsNil) c.Assert(peerHello, Not(IsNil)) data, err := EncodePacket(peerHello) c.Assert(err, IsNil) c.Assert(data, Not(IsNil)) count, err := cnx2.Write(data) c.Assert(err, IsNil) c.Assert(count, Equals, len(data)) oh.MsgN = ONE // end manual hello ------------------------- time.Sleep(100 * time.Millisecond) // wait for ack ack, err := oh.readMsg() c.Assert(err, IsNil) c.Assert(ack, Not(IsNil)) // verify msg returned is an ack and has the correct parameters c.Assert(ack.GetOp(), Equals, XLatticeMsg_Ack) c.Assert(ack.GetMsgN(), Equals, TWO) c.Assert(ack.GetYourMsgN(), Equals, ONE) // FOO // -- KEEPALIVE ------------------------------------------------- cmd := XLatticeMsg_KeepAlive keepAlive := &XLatticeMsg{ Op: &cmd, MsgN: &THREE, } data, err = EncodePacket(keepAlive) c.Assert(err, IsNil) c.Assert(data, Not(IsNil)) count, err = cnx2.Write(data) c.Assert(err, IsNil) c.Assert(count, Equals, len(data)) // Wait for ack. In a better world we time out if an ack is not // received in some short period rather than blocking forever. ack, err = oh.readMsg() c.Assert(err, IsNil) c.Assert(ack, Not(IsNil)) // verify msg returned is an ack and has the correct parameters c.Assert(ack.GetOp(), Equals, XLatticeMsg_Ack) c.Assert(ack.GetMsgN(), Equals, FOUR) c.Assert(ack.GetYourMsgN(), Equals, THREE) // -- BYE ------------------------------------------------------- cmd = XLatticeMsg_Bye bye := &XLatticeMsg{ Op: &cmd, MsgN: &FIVE, } data, err = EncodePacket(bye) c.Assert(err, IsNil) c.Assert(data, Not(IsNil)) count, err = cnx2.Write(data) c.Assert(err, IsNil) c.Assert(count, Equals, len(data)) // Wait for ack. In a better world we time out if an ack is not // received in some short period rather than blocking forever. ack, err = oh.readMsg() c.Assert(err, IsNil) c.Assert(ack, Not(IsNil)) // verify msg returned is an ack and has the correct parameters c.Assert(ack.GetOp(), Equals, XLatticeMsg_Ack) c.Assert(ack.GetMsgN(), Equals, SIX) c.Assert(ack.GetYourMsgN(), Equals, FIVE) // -- STOP THE SERVER ------------------------------------------- stopCh <- true select { case <-stoppedCh: case <-time.After(100 * time.Millisecond): } } // END HANDLER
func (s *XLSuite) TestSecondHello(c *C) { if VERBOSITY > 0 { fmt.Println("TEST_SECOND_HELLO") } // Create a node and add a mock peer. This is a cluster of 2. nodes, accs := xn.MockLocalHostCluster(2) defer func() { for i := 0; i < 2; i++ { if accs[i] != nil { accs[i].Close() } } }() serverNode, clientNode := nodes[0], nodes[1] serverAsPeer := clientNode.GetPeer(0) serverAcc := accs[0] c.Assert(serverAcc, Not(IsNil)) serverAccEP := serverAcc.GetEndPoint() serverCtor, err := xt.NewTcpConnector(serverAccEP) c.Assert(err, IsNil) // serverNode's server side stopCh := make(chan bool, 1) stoppedCh := make(chan bool, 1) // XXX If you comment out this goroutine, there are no mysterious // failures. go func() { for { cnx, err := serverAcc.Accept() // ADDING THIS ELIMINATES MYSTERY FAILURES if err != nil { break } // each connection handled by a separate goroutine go func() { _, _ = NewInHandler(serverNode, cnx, stopCh, stoppedCh) }() } }() // END FUNC // -- WELL-FORMED HELLO ----------------------------------------- // Known peer sends Hello with all parameters correct. Server // replies with an Ack and advance state to open. conn, err := serverCtor.Connect(xt.ANY_TCP_END_POINT) c.Assert(err, IsNil) c.Assert(conn, Not(IsNil)) cnx2 := conn.(*xt.TcpConnection) defer cnx2.Close() oh := &OutHandler{Node: clientNode, CnxHandler: CnxHandler{Cnx: cnx2, Peer: serverAsPeer}} err = oh.SendHello() c.Assert(err, IsNil) // wait for ack ack, err := oh.readMsg() c.Assert(err, IsNil) // XXX "EOF" instead c.Assert(ack, Not(IsNil)) // verify msg returned is an ack and has the correct parameters c.Assert(ack.GetOp(), Equals, XLatticeMsg_Ack) c.Assert(ack.GetMsgN(), Equals, TWO) c.Assert(ack.GetYourMsgN(), Equals, ONE) // FOO // -- SECOND WELL-FORMED HELLO ---------------------------------- // manually create and send a hello message - // XXX HELLO_MSG IS OBSOLETE; it's done with RSA/AES handshake peerHello, err := MakeHelloMsg(clientNode) c.Assert(err, IsNil) c.Assert(peerHello, Not(IsNil)) data, err := EncodePacket(peerHello) c.Assert(err, IsNil) c.Assert(data, Not(IsNil)) count, err := cnx2.Write(data) c.Assert(err, IsNil) c.Assert(count, Equals, len(data)) oh.MsgN = ONE // end manual hello ------------------------- // wait for error message reply, err := oh.readMsg() c.Assert(err, IsNil) c.Assert(reply, Not(IsNil)) // verify msg returned is an reply and has the correct parameters c.Assert(reply.GetOp(), Equals, XLatticeMsg_Error) c.Assert(reply.GetMsgN(), Equals, FOUR) // -- STOP THE SERVER ------------------------------------------- stopCh <- true select { case <-stoppedCh: case <-time.After(100 * time.Millisecond): } }
func (s *XLSuite) TestHelloFromStranger(c *C) { if VERBOSITY > 0 { fmt.Println("TEST_HELLO_FROM_STRANGER") } myNode, myAcc := s.makeANode(c) defer myAcc.Close() c.Assert(myAcc, Not(IsNil)) myAccEP := myAcc.GetEndPoint() myCtor, err := xt.NewTcpConnector(myAccEP) c.Assert(err, IsNil) // myNode's server side stopCh := make(chan bool, 1) stoppedCh := make(chan bool, 1) go func() { for { cnx, err := myAcc.Accept() if err != nil { break } c.Assert(err, IsNil) // each connection handled by a separate goroutine go func() { _, _ = NewInHandler(myNode, cnx, stopCh, stoppedCh) }() } }() // Create a second mock peer unknown to myNode. badGuy, badAcc := s.makeANode(c) defer badAcc.Close() // XXX HELLO_MSG IS OBSOLETE; it's done with RSA/AES handshake badHello, err := MakeHelloMsg(badGuy) c.Assert(err, IsNil) c.Assert(badHello, Not(IsNil)) time.Sleep(100 * time.Millisecond) // Unknown peer sends Hello. Test node should just drop the // connection. It is an error if we receive a reply. conn, err := myCtor.Connect(xt.ANY_TCP_END_POINT) c.Assert(err, IsNil) c.Assert(conn, Not(IsNil)) cnx := conn.(*xt.TcpConnection) defer cnx.Close() data, err := EncodePacket(badHello) c.Assert(err, IsNil) c.Assert(data, Not(IsNil)) count, err := cnx.Write(data) c.Assert(err, IsNil) c.Assert(count, Equals, len(data)) time.Sleep(100 * time.Millisecond) // XXX THIS TEST FAILS because of a deficiency in // transport/tcp_connection.GetState() - it does not look at // the state of the underlying connection // c.Assert(cnx.GetState(), Equals, xt.DISCONNECTED) // -- STOP THE SERVER ------------------------------------------- stopCh <- true select { case <-stoppedCh: case <-time.After(100 * time.Millisecond): } }
func (s *XLSuite) TestPeerSerialization(c *C) { if VERBOSITY > 0 { fmt.Println("TEST_PEER_SERIALIZATION") } rng := xr.MakeSimpleRNG() // this is just a lazy way of building a peer name := rng.NextFileName(4) nid, err := makeNodeID(rng) c.Assert(err, Equals, nil) lfs := "tmp/" + hex.EncodeToString(nid.Value()) node, err := NewNew(name, nid, lfs) c.Assert(err, Equals, nil) // harvest its keys ck := &node.ckPriv.PublicKey ckPEM, err := xc.RSAPubKeyToPEM(ck) c.Assert(err, Equals, nil) sk := &node.skPriv.PublicKey skPEM, err := xc.RSAPubKeyToPEM(sk) c.Assert(err, Equals, nil) // the other bits necessary port := 1024 + rng.Intn(1024) addr := fmt.Sprintf("1.2.3.4:%d", port) ep, err := xt.NewTcpEndPoint(addr) c.Assert(err, Equals, nil) ctor, err := xt.NewTcpConnector(ep) c.Assert(err, Equals, nil) overlay, err := xo.DefaultOverlay(ep) c.Assert(err, Equals, nil) oSlice := []xo.OverlayI{overlay} ctorSlice := []xt.ConnectorI{ctor} peer, err := NewPeer(name, nid, ck, sk, oSlice, ctorSlice) c.Assert(err, Equals, nil) c.Assert(peer, Not(Equals), nil) // build the expected serialization // BaseNode var bns []string s.addAString(&bns, "peer {") s.addAString(&bns, fmt.Sprintf(" name: %s", name)) s.addAString(&bns, fmt.Sprintf(" nodeID: %s", nid.String())) s.addAString(&bns, fmt.Sprintf(" commsPubKey: %s", ckPEM)) s.addAString(&bns, fmt.Sprintf(" sigPubKey: %s", skPEM)) s.addAString(&bns, fmt.Sprintf(" overlays {")) for i := 0; i < len(oSlice); i++ { s.addAString(&bns, fmt.Sprintf(" %s", oSlice[i].String())) } s.addAString(&bns, fmt.Sprintf(" }")) // Specific to Peer s.addAString(&bns, fmt.Sprintf(" connectors {")) for i := 0; i < len(ctorSlice); i++ { s.addAString(&bns, fmt.Sprintf(" %s", ctorSlice[i].String())) } s.addAString(&bns, fmt.Sprintf(" }")) // closes connectors s.addAString(&bns, fmt.Sprintf("}")) // closes peer myVersion := strings.Join(bns, "\n") serialized := peer.String() c.Assert(serialized, Equals, myVersion) backAgain, rest, err := ParsePeer(serialized) c.Assert(err, Equals, nil) c.Assert(len(rest), Equals, 0) reserialized := backAgain.String() c.Assert(reserialized, Equals, serialized) }
// Given contact information for a registry and the name of a cluster, // the client joins the cluster, collects information on the other members, // and terminates when it has info on the entire membership. func NewMemberMaker( node *xn.Node, attrs uint64, regName string, regID *xi.NodeID, regEnd xt.EndPointI, regCK, regSK *rsa.PublicKey, clusterName string, clusterAttrs uint64, clusterID *xi.NodeID, size, epCount uint32, endPoints []xt.EndPointI) ( mm *MemberMaker, err error) { var ( cm *xcl.ClusterMember isAdmin = (attrs & xcl.ATTR_ADMIN) != 0 regPeer *xn.Peer ) // sanity checks on parameter list if node == nil { err = MissingNode } else { if regName == "" || regID == nil || regEnd == nil || regCK == nil { err = MissingServerInfo } if err == nil { // DEBUG fmt.Printf("NemMemberMaker: regEnd is %s\n", regEnd.String()) // END if (attrs & xcl.ATTR_SOLO) == uint64(0) { if clusterName == "" { err = MissingClusterNameOrID if err == nil && size < uint32(1) { // err = ClusterMustHaveTwo err = ClusterMustHaveMember } } if err == nil { // if the client is an admin client epCount applies // to the cluster if epCount < uint32(1) { epCount = uint32(1) } if !isAdmin { // XXX There is some confusion here: we don't require // that all members have the same number of endpoints actualEPCount := uint32(len(endPoints)) if actualEPCount == 0 { err = MemberMustHaveEndPoint } else if epCount > actualEPCount { epCount = actualEPCount } for i := 0; i < int(epCount); i++ { _, err = node.AddEndPoint(endPoints[i]) } } } } } } if err == nil { var ctor xt.ConnectorI var ctors []xt.ConnectorI ctor, err = xt.NewTcpConnector(regEnd) if err == nil { ctors = append(ctors, ctor) regPeer, err = xn.NewPeer(regName, regID, regCK, regSK, nil, ctors) if err == nil { _, err = node.AddPeer(regPeer) } } } if err == nil { cm = &xcl.ClusterMember{ // Attrs gets negotiated ClusterName: clusterName, ClusterAttrs: clusterAttrs, ClusterID: clusterID, ClusterMaxSize: size, EPCount: epCount, // Members added on the fly Members: make([]*xcl.MemberInfo, size), Node: *node, } mm = &MemberMaker{ ProposedAttrs: attrs, DoneCh: make(chan error, 1), RegPeer: regPeer, ClusterMember: *cm, } } return }
func MockLocalHostCluster(K int) (nodes []*Node, accs []*xt.TcpAcceptor) { rng := xr.MakeSimpleRNG() // Create K nodes, each with a NodeID, two RSA private keys (sig and // comms), and two RSA public keys. Each node creates a TcpAcceptor // running on 127.0.0.1 and a random (= system-supplied) port. names := make([]string, K) nodeIDs := make([]*xi.NodeID, K) for i := 0; i < K; i++ { // TODO: MAKE NAMES UNIQUE names[i] = rng.NextFileName(4) val := make([]byte, xu.SHA1_BIN_LEN) rng.NextBytes(val) nodeIDs[i], _ = xi.NewNodeID(val) } nodes = make([]*Node, K) accs = make([]*xt.TcpAcceptor, K) accEndPoints := make([]*xt.TcpEndPoint, K) for i := 0; i < K; i++ { lfs := "tmp/" + hex.EncodeToString(nodeIDs[i].Value()) nodes[i], _ = NewNew(names[i], nodeIDs[i], lfs) } // XXX We need this functionality in using code // defer func() { // for i := 0; i < K; i++ { // if accs[i] != nil { // accs[i].CloseAcc() // } // } // }() // Collect the nodeID, public keys, and listening address from each // node. // all nodes on the same overlay ar, _ := xo.NewCIDRAddrRange("127.0.0.0/8") overlay, _ := xo.NewIPOverlay("XO", ar, "tcp", 1.0) // add an endpoint to each node for i := 0; i < K; i++ { ep, _ := xt.NewTcpEndPoint("127.0.0.1:0") nodes[i].AddEndPoint(ep) nodes[i].OpenAcc() // XXX POSSIBLE ERRORS IGNORED accs[i] = nodes[i].GetAcceptor(0).(*xt.TcpAcceptor) accEndPoints[i] = accs[i].GetEndPoint().(*xt.TcpEndPoint) } ckPrivs := make([]*rsa.PublicKey, K) skPrivs := make([]*rsa.PublicKey, K) ctors := make([]*xt.TcpConnector, K) for i := 0; i < K; i++ { // we already have nodeIDs ckPrivs[i] = nodes[i].GetCommsPublicKey() skPrivs[i] = nodes[i].GetSigPublicKey() ctors[i], _ = xt.NewTcpConnector(accEndPoints[i]) } overlaySlice := []xo.OverlayI{overlay} peers := make([]*Peer, K) for i := 0; i < K; i++ { ctorSlice := []xt.ConnectorI{ctors[i]} _ = ctorSlice peers[i], _ = NewPeer(names[i], nodeIDs[i], ckPrivs[i], skPrivs[i], overlaySlice, ctorSlice) } // Use the information collected to configure each node. for i := 0; i < K; i++ { for j := 0; j < K; j++ { if i != j { nodes[i].AddPeer(peers[j]) } } } return }