Exemple #1
0
func (s *XLSuite) makeALocalEndPoint(c *C, node *xn.Node) {
	addr := fmt.Sprintf("127.0.0.1:0")
	ep, err := xt.NewTcpEndPoint(addr)
	c.Assert(err, IsNil)
	c.Assert(ep, Not(IsNil))
	ndx, err := node.AddEndPoint(ep)
	c.Assert(err, IsNil)
	c.Assert(ndx, Equals, 0) // it's the only one
}
Exemple #2
0
func NewMemberInfoFromToken(token *XLRegMsg_Token) (
	m *xcl.MemberInfo, err error) {

	var (
		ck, sk *rsa.PublicKey
		ctor   xt.ConnectorI
		ctors  []xt.ConnectorI
		farEnd xt.EndPointI
		nodeID *xi.NodeID
		peer   *xn.Peer
	)
	if token == nil {
		err = NilToken
	} else {
		nodeID, err = xi.New(token.GetID())
		if err == nil {
			ck, err = xc.RSAPubKeyFromWire(token.GetCommsKey())
			if err == nil {
				sk, err = xc.RSAPubKeyFromWire(token.GetSigKey())
				if err == nil {
					attrs := token.GetAttrs()
					myEnds := token.GetMyEnds()
					for i := 0; i < len(myEnds); i++ {
						myEnd := myEnds[i]
						farEnd, err = xt.NewTcpEndPoint(myEnd)
						if err != nil {
							break
						}
						ctor, err = xt.NewTcpConnector(farEnd)
						if err != nil {
							break
						}
						ctors = append(ctors, ctor)
					}
					if err == nil {
						peer, err = xn.NewPeer(token.GetName(), nodeID,
							ck, sk, nil, ctors)
						if err == nil {
							m = &xcl.MemberInfo{
								Attrs: attrs,
								Peer:  peer,
							}
						}
					}
				}
				//if err == nil {
				//	m, err = NewMemberInfo(token.GetName(), nodeID,
				//		ck, sk, token.GetAttrs(), token.GetMyEnds())
				//}
			}
		}
	}
	return
}
func (s *XLSuite) shouldGetDefault(c *C, addr string) OverlayI {
	e, err := xt.NewTcpEndPoint(addr)
	c.Assert(err, Equals, nil)
	c.Assert(e, Not(Equals), nil)

	o, err := DefaultOverlay(e)
	c.Assert(err, Equals, nil)
	c.Assert(o.Name(), Not(Equals), "") // NPE?
	c.Assert(o.Transport(), Equals, "ip")
	c.Assert(o.Cost(), Equals, float32(1.0))
	return o
}
Exemple #4
0
func NewMockUpaxClient(name, lfs string, members []*xcl.MemberInfo,
	primary uint) (mc *MockUpaxClient, err error) {
	var (
		ckPriv, skPriv *rsa.PrivateKey
		ep             []xt.EndPointI
		node           *xn.Node
		uc             *UpaxClient
	)

	// lfs should be a well-formed POSIX path; if the directory does
	// not exist we should create it.
	err = xf.CheckLFS(lfs, 0750)

	// The ckPriv is an RSA key used to encrypt short messages.
	if err == nil {
		if ckPriv == nil {
			ckPriv, err = rsa.GenerateKey(rand.Reader, 2048)
		}
		if err == nil {
			// The skPriv is an RSA key used to create digital signatures.
			if skPriv == nil {
				skPriv, err = rsa.GenerateKey(rand.Reader, 2048)
			}
		}
	}
	// The mock client uses a system-assigned endpoint
	if err == nil {
		var endPoint *xt.TcpEndPoint
		endPoint, err = xt.NewTcpEndPoint("127.0.0.1:0")
		if err == nil {
			ep = []xt.EndPointI{endPoint}
		}
	}
	// spin up an XLattice node
	if err == nil {
		node, err = xn.New(name, nil, // get a default NodeID
			lfs, ckPriv, skPriv, nil, ep, nil) // nil overlays, peers
	}
	if err == nil {
		uc, err = NewUpaxClient(ckPriv, skPriv, node, members, primary)
		if err == nil {
			mc = &MockUpaxClient{
				UpaxClient: *uc,
			}
		}
	}
	return
}
// same test using NewCIDRAddrRange()
func (s *XLSuite) TestIsElement2(c *C) {
	rng := xr.MakeSimpleRNG()
	name := rng.NextFileName(8)
	a10_8, err := NewCIDRAddrRange("10.0.0.0/8")
	c.Assert(err, IsNil)
	o10_8, err := NewIPOverlay(name, a10_8, "ip", 1.0)
	c.Assert(err, IsNil)

	// bad transport(s)
	mockE := xt.NewMockEndPoint("foo", "1234")
	c.Assert(o10_8.IsElement(mockE), Equals, false)

	// 10/8 ---------------------------------------------------------
	c.Assert(a10_8.PrefixLen(), Equals, uint(8))
	c.Assert(a10_8.AddrLen(), Equals, uint(32))
	prefix := a10_8.Prefix()
	c.Assert(prefix[0], Equals, byte(10))

	e1, err := xt.NewTcpEndPoint("10.11.12.13:55555")
	c.Assert(err, IsNil)
	c.Assert(e1, Not(IsNil))
	c.Assert(o10_8.IsElement(e1), Equals, true)
	e2, err := xt.NewTcpEndPoint("9.10.11.12:4444")
	c.Assert(err, IsNil)
	c.Assert(e2, Not(IsNil))
	c.Assert(o10_8.IsElement(e2), Equals, false)

	// 192.168/16 ---------------------------------------------------
	a192_168, err := NewCIDRAddrRange("192.168.0.0/16")
	c.Assert(err, IsNil)
	o192_168, err := NewIPOverlay(name, a192_168, "ip", 1.0)
	c.Assert(err, IsNil)
	c.Assert(a192_168.PrefixLen(), Equals, uint(16))
	c.Assert(a192_168.AddrLen(), Equals, uint(32))
	prefix = a192_168.Prefix()
	c.Assert(prefix[0], Equals, byte(192))
	c.Assert(prefix[1], Equals, byte(168))

	e10, err := xt.NewTcpEndPoint("192.168.0.0:1")
	c.Assert(err, IsNil)
	c.Assert(o192_168.IsElement(e10), Equals, true)
	e11, err := xt.NewTcpEndPoint("192.168.255.255:2")
	c.Assert(err, IsNil)
	c.Assert(o192_168.IsElement(e11), Equals, true)
	e20, err := xt.NewTcpEndPoint("192.167.255.255:3")
	c.Assert(err, IsNil)
	c.Assert(o192_168.IsElement(e20), Equals, false)
	e21, err := xt.NewTcpEndPoint("192.169.0.0:4")
	c.Assert(err, IsNil)
	c.Assert(o192_168.IsElement(e21), Equals, false)
}
Exemple #6
0
/**
 * Do whatever is necessary to transition a Node to the running state;
 * in particular, open all acceptors.
 */
func (n *Node) OpenAcc() (err error) {

	n.mu.Lock()
	defer n.mu.Unlock()
	if !n.running {
		// XXX STUB
		n.running = true

		count := len(n.endPoints)
		if count > 0 {
			for i := 0; err == nil && i < count; i++ {
				var acc *xt.TcpAcceptor
				e := n.endPoints[i]
				// DEBUG
				//fmt.Printf("OpenAcc: endPoint %d is %s\n", i, e.String())
				// END
				if e.Transport() == "tcp" {
					// XXX HACK ON ADDRESS
					strAddr := e.String()[13:]
					unBound := strings.HasSuffix(strAddr, ":0")
					acc, err = xt.NewTcpAcceptor(strAddr)
					if err == nil && unBound {
						// DEBUG
						//fmt.Printf("BINDING endPoint %d\n", i)
						// END
						strAddr = acc.String()[26:]
						n.endPoints[i], err = xt.NewTcpEndPoint(strAddr)
					}
					// DEBUG
					//fmt.Printf("OpenAcc: acceptor %d is %s\n", i, acc.String())
					//fmt.Printf("OpenAcc: endPoint %d is %s\n",
					//	i, n.endPoints[i].String())
					// END
				}
				if err == nil {
					n.acceptors = append(n.acceptors, acc) // XXX ACCEPTORS
				}
			}
		}
	}
	return
}
Exemple #7
0
func (s *XLSuite) makeANode(c *C) (badGuy *xn.Node, acc xt.AcceptorI) {
	rng := xr.MakeSimpleRNG()
	id := make([]byte, xu.SHA1_BIN_LEN)
	rng.NextBytes(id)
	nodeID, err := xi.NewNodeID(id)
	c.Assert(err, IsNil)
	name := rng.NextFileName(8)
	lfs := "tmp/" + hex.EncodeToString(id)
	badGuy, err = xn.NewNew(name, nodeID, lfs)
	c.Assert(err, IsNil)
	accCount := badGuy.SizeAcceptors()
	c.Assert(accCount, Equals, 0)
	ep, err := xt.NewTcpEndPoint("127.0.0.1:0")
	c.Assert(err, IsNil)
	ndx, err := badGuy.AddEndPoint(ep)
	c.Assert(err, IsNil)
	c.Assert(ndx, Equals, 0)
	acc = badGuy.GetAcceptor(0)
	return
}
Exemple #8
0
func (s *XLSuite) TestPeerSerialization(c *C) {
	if VERBOSITY > 0 {
		fmt.Println("TEST_PEER_SERIALIZATION")
	}
	rng := xr.MakeSimpleRNG()

	// this is just a lazy way of building a peer
	name := rng.NextFileName(4)
	nid, err := makeNodeID(rng)
	c.Assert(err, Equals, nil)

	lfs := "tmp/" + hex.EncodeToString(nid.Value())
	node, err := NewNew(name, nid, lfs)
	c.Assert(err, Equals, nil)

	// harvest its keys
	ck := &node.ckPriv.PublicKey
	ckPEM, err := xc.RSAPubKeyToPEM(ck)
	c.Assert(err, Equals, nil)
	sk := &node.skPriv.PublicKey
	skPEM, err := xc.RSAPubKeyToPEM(sk)
	c.Assert(err, Equals, nil)

	// the other bits necessary
	port := 1024 + rng.Intn(1024)
	addr := fmt.Sprintf("1.2.3.4:%d", port)
	ep, err := xt.NewTcpEndPoint(addr)
	c.Assert(err, Equals, nil)
	ctor, err := xt.NewTcpConnector(ep)
	c.Assert(err, Equals, nil)
	overlay, err := xo.DefaultOverlay(ep)
	c.Assert(err, Equals, nil)
	oSlice := []xo.OverlayI{overlay}
	ctorSlice := []xt.ConnectorI{ctor}
	peer, err := NewPeer(name, nid, ck, sk, oSlice, ctorSlice)
	c.Assert(err, Equals, nil)
	c.Assert(peer, Not(Equals), nil)

	// build the expected serialization

	// BaseNode
	var bns []string
	s.addAString(&bns, "peer {")
	s.addAString(&bns, fmt.Sprintf("    name: %s", name))
	s.addAString(&bns, fmt.Sprintf("    nodeID: %s", nid.String()))
	s.addAString(&bns, fmt.Sprintf("    commsPubKey: %s", ckPEM))
	s.addAString(&bns, fmt.Sprintf("    sigPubKey: %s", skPEM))
	s.addAString(&bns, fmt.Sprintf("    overlays {"))
	for i := 0; i < len(oSlice); i++ {
		s.addAString(&bns, fmt.Sprintf("        %s", oSlice[i].String()))
	}
	s.addAString(&bns, fmt.Sprintf("    }"))

	// Specific to Peer
	s.addAString(&bns, fmt.Sprintf("    connectors {"))
	for i := 0; i < len(ctorSlice); i++ {
		s.addAString(&bns, fmt.Sprintf("        %s", ctorSlice[i].String()))
	}
	s.addAString(&bns, fmt.Sprintf("    }")) // closes connectors
	s.addAString(&bns, fmt.Sprintf("}"))     // closes peer
	myVersion := strings.Join(bns, "\n")

	serialized := peer.String()
	c.Assert(serialized, Equals, myVersion)

	backAgain, rest, err := ParsePeer(serialized)
	c.Assert(err, Equals, nil)
	c.Assert(len(rest), Equals, 0)
	reserialized := backAgain.String()
	c.Assert(reserialized, Equals, serialized)

}
Exemple #9
0
func (s *XLSuite) doTestCluster(c *C, rng *xr.PRNG, whichSHA int) {

	if VERBOSITY > 0 {
		fmt.Printf("TEST_CLUSTER whichSHA = %v\n", whichSHA)
	}

	// read regCred.dat to get keys etc for a registry --------------
	dat, err := ioutil.ReadFile("regCred.dat")
	c.Assert(err, IsNil)
	regCred, err := reg.ParseRegCred(string(dat))
	c.Assert(err, IsNil)
	regServerName := regCred.Name
	regServerID := regCred.ID
	regServerEnd := regCred.EndPoints[0]
	regServerCK := regCred.CommsPubKey
	regServerSK := regCred.SigPubKey

	// Devise a unique cluster name.  We rely on the convention -----
	// that in Upax tests, the local file system for Upax servers is
	// tmp/CLUSTER-NAME/SERVER-NAME.

	clusterName := rng.NextFileName(8)
	clusterPath := filepath.Join("tmp", clusterName)
	for {
		if _, err = os.Stat(clusterPath); os.IsNotExist(err) {
			break
		}
		clusterName = rng.NextFileName(8)
		clusterPath = filepath.Join("tmp", clusterName)
	}
	err = xf.CheckLFS(clusterPath, 0750)
	c.Assert(err, IsNil)

	// DEBUG
	fmt.Printf("CLUSTER      %s\n", clusterName)
	fmt.Printf("CLUSTER_PATH %s\n", clusterPath)
	// END

	// Set the test size in various senses --------------------------
	// K1 is the number of servers, and so the cluster size.  K2 is
	// the number of clients, M the number of messages sent (items to
	// be added to the Upax store), LMin and LMax message lengths.
	K1 := uint32(3 + rng.Intn(5)) // so 3..7
	K2 := uint32(2 + rng.Intn(4)) // so 2..5
	M := 16 + rng.Intn(16)        // 16..31
	LMin := 64 + rng.Intn(64)
	LMax := 128 + rng.Intn(128)

	// Use an admin client to get a clusterID for this clusterName --
	const EP_COUNT = 2
	an, err := reg.NewAdminClient(regServerName, regServerID, regServerEnd,
		regServerCK, regServerSK, clusterName, uint64(0), K1, EP_COUNT, nil)
	c.Assert(err, IsNil)
	an.Start()
	<-an.DoneCh

	clusterID := an.ClusterID // a NodeID, not []byte
	if clusterID == nil {
		fmt.Println("NIL CLUSTER ID: is xlReg running??")
	}
	c.Assert(clusterID, NotNil)
	clusterSize := an.ClusterMaxSize
	c.Assert(clusterSize, Equals, uint32(K1))
	epCount := an.EPCount
	c.Assert(epCount, Equals, uint32(EP_COUNT))

	// Create names and LFSs for the K1 members ---------------------
	// We create a distinct tmp/clusterName/serverName for each
	// server as its local file system (LFS).
	memberNames := make([]string, K1)
	memberPaths := make([]string, K1)
	ckPriv := make([]*rsa.PrivateKey, K1)
	skPriv := make([]*rsa.PrivateKey, K1)
	for i := uint32(0); i < K1; i++ {
		var found bool
		memberNames[i] = rng.NextFileName(8)
		memberPaths[i] = filepath.Join(clusterPath, memberNames[i])
		found, err = xf.PathExists(memberPaths[i])
		c.Assert(err, IsNil)
		for found {
			memberNames[i] = rng.NextFileName(8)
			memberPaths[i] = filepath.Join(clusterPath, memberNames[i])
			found, err = xf.PathExists(memberPaths[i])
			c.Assert(err, IsNil)
		}
		// DEBUG
		fmt.Printf("MEMBER_PATH[%d]: %s\n", i, memberPaths[i])
		// END
		err = os.MkdirAll(memberPaths[i], 0750)
		c.Assert(err, IsNil)
		ckPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys
		c.Assert(err, IsNil)
		c.Assert(ckPriv[i], NotNil)
		skPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys
		c.Assert(err, IsNil)
		c.Assert(skPriv[i], NotNil)
	}

	// create K1 client nodes ---------------------------------------
	uc := make([]*reg.UserMember, K1)
	for i := uint32(0); i < K1; i++ {
		var ep1, ep2 *xt.TcpEndPoint
		ep1, err = xt.NewTcpEndPoint("127.0.0.1:0")
		ep2, err = xt.NewTcpEndPoint("127.0.0.1:0")
		c.Assert(err, IsNil)
		e := []xt.EndPointI{ep1, ep2}
		uc[i], err = reg.NewUserMember(memberNames[i], memberPaths[i],
			ckPriv[i], skPriv[i],
			regServerName, regServerID, regServerEnd, regServerCK, regServerSK,
			clusterName, an.ClusterAttrs, an.ClusterID,
			K1, EP_COUNT, e)
		c.Assert(err, IsNil)
		c.Assert(uc[i], NotNil)
		c.Assert(uc[i].ClusterID, NotNil)
		c.Assert(uc[i].MemberMaker.DoneCh, NotNil)
	}
	// Start the K1 client nodes running ----------------------------
	for i := uint32(0); i < K1; i++ {
		uc[i].Start()
	}

	fmt.Println("ALL CLIENTS STARTED")

	// wait until all clientNodes are done --------------------------
	for i := uint32(0); i < K1; i++ {
		err = <-uc[i].MemberMaker.DoneCh
		c.Assert(err, IsNil)
		// nodeID := uc[i].clientID
	}
	fmt.Println("ALL CLIENTS DONE") // XXX NOT SEEN

	// verify that all clientNodes have meaningful baseNodes --------
	// XXX THESE TESTS ALWAYS FAIL
	//for i := 0; i < K1; i++ {
	//	c.Assert(uc[i].GetName(), Equals, memberNames[i])
	//	c.Assert(uc[i].GetNodeID(), NotNil)
	//	c.Assert(uc[i].GetCommsPublicKey(), NotNil)
	//	c.Assert(uc[i].GetSigPublicKey(), NotNil)
	//}
	// convert the client nodes to UpaxServers ----------------------
	us := make([]*UpaxServer, K1)
	for i := uint32(0); i < K1; i++ {
		err = uc[i].PersistClusterMember()
		c.Assert(err, IsNil)
		us[i], err = NewUpaxServer(
			ckPriv[i], skPriv[i], &uc[i].ClusterMember, whichSHA)
		c.Assert(err, IsNil)
		c.Assert(us[i], NotNil)
	}
	// verify files are present and then start the servers ----------

	// 11-07 TODO, modified:
	// Run() causes each server to send ItsMe to all other servers;
	// as each gets its Ack, it starts the KeepAlive/Ack cycle running
	// at a 50 ms interval specified as a Run() argument and then sends
	// on DoneCh.  Second parameter is lifetime of the server in
	// keep-alives, say 20 (so 1 sec in total).  When this time has
	// passed, the server will send again on DoneCh, and then shut down.

	// XXX STUB
	for i := uint32(0); i < K1; i++ {
		err = us[i].Run(10*time.Millisecond, 20)
		c.Assert(err, IsNil)
	}

	// Verify servers are running -------------------------
	// 11-18: we wait for the first done from each server.
	//
	// XXX STUB
	for i := uint32(0); i < K1; i++ {
		<-us[i].DoneCh
	}
	// DEBUG
	fmt.Println("all servers have sent first DONE")
	// END

	// When all UpaxServers are ready, create K2 clients.--
	// Each client creates K3 separate datums of differnt
	// length (L1..L2) and content.  Each client signals
	// when done.

	// XXX STUB

	// Verify for each of the K2 clients ------------------
	// that its data is present on the selected server.  We
	// do this by an Exists() call on uDir for the server's
	// LFS/U for each item posted.

	// XXX STUB

	// After a reasonable deltaT, verify that all servers--
	// have a copy of each and every datum.

	// XXX STUB

	_, _, _, _ = K2, M, LMin, LMax
}
func (s *XLSuite) TestPortlandRegCred(c *C) {
	if VERBOSITY > 0 {
		fmt.Println("\nTEST_PORTLAND_REG_CRED")
	}
	rng := xr.MakeSimpleRNG()
	_ = rng

	// read our local copy of the reg cred
	rcData, err := ioutil.ReadFile("portlandRegCred.dat")
	c.Assert(err, IsNil)
	rc, err := ParseRegCred(string(rcData))
	c.Assert(err, IsNil)
	c.Assert(rc, NotNil)

	// DEBUG
	fmt.Println("portlandRegCred_test PORTLAND - A PUZZLE")
	// END

	// set up the client --------------------------------------------
	name := rng.NextFileName(8)
	lfs := path.Join("tmp", name)
	found, err := xf.PathExists(lfs)
	c.Assert(err, IsNil)
	for found {
		name = rng.NextFileName(8)
		lfs = path.Join("tmp", name)
		found, err = xf.PathExists(lfs)
		c.Assert(err, IsNil)
	}

	ep, err := xt.NewTcpEndPoint("127.0.0.1:0")
	c.Assert(err, IsNil)
	e := []xt.EndPointI{ep}
	c.Assert(e, NotNil)

	nodeID, err := xi.New(nil)
	c.Assert(err, IsNil)

	node, err := xn.NewNew(name, nodeID, lfs)
	c.Assert(err, IsNil)

	// DEBUG
	fmt.Printf("Portland client is at %v; lfs is %s\n",
		e, lfs)
	// END

	// set up its relationship to the server ------------------------
	serverName := rc.Name
	serverID := rc.ID
	serverEnd := rc.EndPoints
	c.Assert(serverEnd, NotNil)
	c.Assert(len(serverEnd) > 0, Equals, true)
	c.Assert(serverEnd[0], NotNil)
	// XXX TOO RIGID
	c.Assert(serverEnd[0].String(), Equals, "TcpEndPoint: 54.186.197.123:56789")
	// END
	serverCK := rc.CommsPubKey
	serverSK := rc.SigPubKey

	sc, err := NewSoloMember(node, serverName, serverID, serverEnd[0],
		serverCK, serverSK, e)
	c.Assert(err, IsNil)
	c.Assert(sc, NotNil)

	// DEBUG
	fmt.Println("SoloMember CREATED")

	// END

	// 3. run the client
	sc.Start()
	err = <-sc.DoneCh

	// 4.  verify that the client LFS exists and is correct ---------
	found, err = xf.PathExists(lfs)
	c.Assert(err, IsNil)
	c.Assert(found, Equals, true)

	// 5.  shut down the client -------------------------------------
	sc.CloseAcc() // should close any acceptors

}
Exemple #11
0
func (s *XLSuite) TestEphServer(c *C) {
	if VERBOSITY > 0 {
		fmt.Println("\nTEST_EPH_SERVER")
	}

	rng := xr.MakeSimpleRNG()

	// 1.  create a new ephemeral server ----------------------------
	es, err := NewEphServer()
	c.Assert(err, IsNil)
	c.Assert(es, NotNil)

	server := es.Server

	c.Assert(&server.RegNode.ckPriv.PublicKey,
		DeepEquals, server.GetCommsPublicKey())
	serverName := server.GetName()
	serverID := server.GetNodeID()
	serverEnd := server.GetEndPoint(0)
	serverCK := server.GetCommsPublicKey()
	serverSK := server.GetSigPublicKey()
	c.Assert(serverEnd, NotNil)

	// start the ephemeral server -------------------------
	err = es.Start()
	c.Assert(err, IsNil)
	defer es.Stop() // stop the server, closing its acceptor

	// DEBUG
	fmt.Printf("TestEphServer: server acc %s\n", server.GetAcceptor().String())
	fmt.Printf("               serverEnd %s\n", server.GetEndPoint(0).String())
	// END

	// verify Bloom filter is running
	reg := es.Server.Registry
	c.Assert(reg, NotNil)
	regID := reg.GetNodeID()
	c.Assert(reg.IDCount(), Equals, uint(1)) // the registry's own ID
	found, err := reg.ContainsID(regID)
	c.Assert(found, Equals, true)
	c.Assert(reg.IDCount(), Equals, uint(1))

	// 2. create a random cluster name, size, scratch directory -----
	clusterName := rng.NextFileName(8)
	clusterDir := path.Join("tmp", clusterName)
	for {
		if _, err = os.Stat(clusterDir); os.IsNotExist(err) {
			break
		}
		clusterName = rng.NextFileName(8)
		clusterDir = path.Join("tmp", clusterName)
	}
	err = xf.CheckLFS(clusterDir, 0750)
	c.Assert(err, IsNil)

	// DEBUG
	fmt.Printf("CLUSTER NAME: %s\n", clusterName)
	// END
	clusterAttrs := uint64(rng.Int63())
	K := uint32(2 + rng.Intn(6)) // so the size is 2 .. 7

	// 3. create an AdminClient, use it to get the clusterID
	// DEBUG
	fmt.Printf("\neph_server_test: creating ADMIN client\n")
	// END

	an, err := NewAdminClient(serverName, serverID, serverEnd,
		serverCK, serverSK, clusterName, clusterAttrs, K, uint32(1), nil)
	c.Assert(err, IsNil)

	an.Start()
	err = <-an.DoneCh
	c.Assert(err, IsNil)

	anID := an.ClusterMember.Node.GetNodeID()

	// DEBUG
	fmt.Println("\nADMIN CLIENT GETS:")
	fmt.Printf("  regID     %s\n", regID.String())
	fmt.Printf("  anID      %s\n", anID.String())
	if an.ClusterID == nil {
		fmt.Printf("  ClusterID NIL\n")
	} else {
		fmt.Printf("  ClusterID %s\n", an.ClusterID.String())
	}
	// END

	c.Check(reg.IDCount(), Equals, uint(3))
	c.Assert(an.ClusterID, NotNil) // the purpose of the exercise
	c.Assert(an.EPCount, Equals, uint32(1))

	found, err = reg.ContainsID(regID)
	c.Assert(err, IsNil)
	c.Assert(found, Equals, true)

	found, err = reg.ContainsID(anID)
	c.Assert(err, IsNil)
	c.Check(found, Equals, true)

	found, err = reg.ContainsID(an.ClusterID)
	c.Assert(err, IsNil)
	c.Check(found, Equals, true)

	c.Check(reg.IDCount(), Equals, uint(3)) // regID + anID + clusterID

	// 4. create K members ------------------------------------------

	// DEBUG
	fmt.Printf("\nCREATING %d MEMBERS\n", K)
	// END
	uc := make([]*UserMember, K)
	ucNames := make([]string, K)
	namesInUse := make(map[string]bool)
	epCount := uint32(2)
	for i := uint32(0); i < K; i++ {
		var endPoints []xt.EndPointI
		for j := uint32(0); j < epCount; j++ {
			var ep *xt.TcpEndPoint
			ep, err = xt.NewTcpEndPoint("127.0.0.1:0")
			c.Assert(err, IsNil)
			endPoints = append(endPoints, ep)
		}
		newName := rng.NextFileName(8)
		_, ok := namesInUse[newName]
		for ok {
			newName = rng.NextFileName(8)
			_, ok = namesInUse[newName]
		}
		namesInUse[newName] = true
		ucNames[i] = newName // guaranteed to be LOCALLY unique
		lfs := path.Join(clusterDir, newName)
		uc[i], err = NewUserMember(ucNames[i], lfs,
			nil, nil, // private RSA keys are generated if nil
			serverName, serverID, serverEnd, serverCK, serverSK,
			clusterName, an.ClusterAttrs, an.ClusterID,
			K, epCount, endPoints)
		c.Assert(err, IsNil)
		c.Assert(uc[i], NotNil)
		c.Assert(uc[i].ClusterID, NotNil)
	}

	// 5. initialize the K members, each in a separate goroutine ----
	for i := uint32(0); i < K; i++ {
		uc[i].Start()
	}

	// wait until all members are initialized -----------------------
	for i := uint32(0); i < K; i++ {
		doneErr := <-uc[i].MemberMaker.DoneCh
		c.Assert(doneErr, IsNil)
		// among other things, the Persist makes the nodes start listening
		uc[i].MemberMaker.PersistClusterMember()
		nodeID := uc[i].MemberMaker.GetNodeID()
		c.Assert(nodeID, NotNil)
		found, err := reg.ContainsID(nodeID)
		c.Assert(err, IsNil)
		c.Check(found, Equals, true)
	}
	c.Assert(reg.IDCount(), Equals, uint(3+K)) // regID + anID + clusterID + K

	// 6. verify that the nodes are live ----------------------------
	for i := uint32(0); i < K; i++ {
		mn := uc[i].MemberMaker
		cm := mn.ClusterMember
		node := cm.Node
		mnEPCount := uint32(node.SizeEndPoints())
		c.Assert(mnEPCount, Equals, epCount)
		actualEPCount := uint32(mn.SizeEndPoints())
		c.Assert(actualEPCount, Equals, epCount)
		actualAccCount := uint32(mn.SizeAcceptors())
		c.Assert(actualAccCount, Equals, epCount)
		for j := uint32(0); j < epCount; j++ {
			nodeEP := cm.GetEndPoint(int(j)).String()
			nodeAcc := cm.GetAcceptor(int(j)).String()
			c.Assert(strings.HasSuffix(nodeEP, ":0"), Equals, false)
			c.Assert(strings.HasSuffix(nodeAcc, nodeEP), Equals, true)
			// DEBUG
			fmt.Printf("node %d: endPoint %d is %s\n",
				i, j, cm.GetEndPoint(int(j)).String())
			// END
		}

	}

	// verify that results are as expected --------------------------

	// XXX STUB XXX
}
Exemple #12
0
func ParseRegCred(s string) (rc *RegCred, err error) {

	var (
		line    string
		parts   []string
		name    string
		nodeID  *xi.NodeID
		ck, sk  *rsa.PublicKey
		e       []xt.EndPointI
		version xu.DecimalVersion
	)
	ss := strings.Split(s, "\n")
	line, err = xc.NextNBLine(&ss)
	if (err == nil) && (line != "regCred {") {
		err = IllFormedRegCred
	}
	if err == nil {
		line, err = xc.NextNBLine(&ss)
		if err == nil {
			parts = strings.Split(line, ": ")
			if len(parts) == 2 && parts[0] == "Name" {
				name = strings.TrimLeft(parts[1], " \t")
			} else {
				err = IllFormedRegCred
			}
		}
	}
	if err == nil {
		var id []byte
		line, err = xc.NextNBLine(&ss)
		if err == nil {
			parts = strings.Split(line, ": ")
			if len(parts) == 2 && parts[0] == "ID" {
				id, err = hex.DecodeString(parts[1])
			} else {
				err = IllFormedRegCred
			}
		}
		if err == nil {
			nodeID, err = xi.New(id)
		}
	}
	if err == nil {
		line, err = xc.NextNBLine(&ss)
		if err == nil {
			parts = strings.Split(line, ": ")
			if len(parts) == 2 && parts[0] == "CommsPubKey" {
				ck, err = xc.RSAPubKeyFromDisk([]byte(parts[1]))
			} else {
				err = IllFormedRegCred
			}
		}
	}
	if err == nil {
		line, err = xc.NextNBLine(&ss)
		if err == nil {
			parts = strings.Split(line, ": ")
			if len(parts) == 2 && parts[0] == "SigPubKey" {
				sk, err = xc.RSAPubKeyFromDisk([]byte(parts[1]))
			} else {
				err = IllFormedRegCred
			}
		}
	}
	if err == nil {
		line, err = xc.NextNBLine(&ss)
		if err == nil {
			// collect EndPoints section; this should be turned into a
			// utility function
			if line == "EndPoints {" {
				for err == nil {
					line = strings.TrimSpace(ss[0]) // peek
					if line == "}" {
						break
					}
					line, err = xc.NextNBLine(&ss)
					if err == nil {
						line = strings.TrimSpace(line)
						parts := strings.Split(line, ": ")
						if len(parts) != 2 || parts[0] != "TcpEndPoint" {
							err = IllFormedRegCred
						} else {
							var ep xt.EndPointI
							ep, err = xt.NewTcpEndPoint(parts[1])
							if err == nil {
								e = append(e, ep)
							}
						}
					}
				}
				if err == nil {
					line, err = xc.NextNBLine(&ss)
					if (err == nil) && (line != "}") {
						err = MissingClosingBrace
					}
				}
			} else {
				err = MissingEndPointsSection
			}
		}
	}
	if err == nil {
		line, err = xc.NextNBLine(&ss)
		if err == nil {
			parts = strings.Split(line, ": ")
			if len(parts) == 2 && parts[0] == "Version" {
				version, err = xu.ParseDecimalVersion(parts[1])
			} else {
				err = IllFormedRegCred
			}
		}
	}
	if err == nil {
		rc = &RegCred{
			Name:        name,
			ID:          nodeID,
			CommsPubKey: ck,
			SigPubKey:   sk,
			EndPoints:   e,
			Version:     version,
		}
	}
	return
}
func MockLocalHostCluster(K int) (nodes []*Node, accs []*xt.TcpAcceptor) {

	rng := xr.MakeSimpleRNG()

	// Create K nodes, each with a NodeID, two RSA private keys (sig and
	// comms), and two RSA public keys.  Each node creates a TcpAcceptor
	// running on 127.0.0.1 and a random (= system-supplied) port.
	names := make([]string, K)
	nodeIDs := make([]*xi.NodeID, K)
	for i := 0; i < K; i++ {
		// TODO: MAKE NAMES UNIQUE
		names[i] = rng.NextFileName(4)
		val := make([]byte, xu.SHA1_BIN_LEN)
		rng.NextBytes(val)
		nodeIDs[i], _ = xi.NewNodeID(val)
	}
	nodes = make([]*Node, K)
	accs = make([]*xt.TcpAcceptor, K)
	accEndPoints := make([]*xt.TcpEndPoint, K)
	for i := 0; i < K; i++ {
		lfs := "tmp/" + hex.EncodeToString(nodeIDs[i].Value())
		nodes[i], _ = NewNew(names[i], nodeIDs[i], lfs)
	}
	// XXX We need this functionality in using code
	//	defer func() {
	//		for i := 0; i < K; i++ {
	//			if accs[i] != nil {
	//				accs[i].CloseAcc()
	//			}
	//		}
	//	}()

	// Collect the nodeID, public keys, and listening address from each
	// node.

	// all nodes on the same overlay
	ar, _ := xo.NewCIDRAddrRange("127.0.0.0/8")
	overlay, _ := xo.NewIPOverlay("XO", ar, "tcp", 1.0)

	// add an endpoint to each node
	for i := 0; i < K; i++ {
		ep, _ := xt.NewTcpEndPoint("127.0.0.1:0")
		nodes[i].AddEndPoint(ep)
		nodes[i].OpenAcc() // XXX POSSIBLE ERRORS IGNORED
		accs[i] = nodes[i].GetAcceptor(0).(*xt.TcpAcceptor)
		accEndPoints[i] = accs[i].GetEndPoint().(*xt.TcpEndPoint)
	}

	ckPrivs := make([]*rsa.PublicKey, K)
	skPrivs := make([]*rsa.PublicKey, K)
	ctors := make([]*xt.TcpConnector, K)

	for i := 0; i < K; i++ {
		// we already have nodeIDs
		ckPrivs[i] = nodes[i].GetCommsPublicKey()
		skPrivs[i] = nodes[i].GetSigPublicKey()
		ctors[i], _ = xt.NewTcpConnector(accEndPoints[i])
	}

	overlaySlice := []xo.OverlayI{overlay}
	peers := make([]*Peer, K)
	for i := 0; i < K; i++ {
		ctorSlice := []xt.ConnectorI{ctors[i]}
		_ = ctorSlice
		peers[i], _ = NewPeer(names[i], nodeIDs[i], ckPrivs[i], skPrivs[i],
			overlaySlice, ctorSlice)
	}

	// Use the information collected to configure each node.
	for i := 0; i < K; i++ {
		for j := 0; j < K; j++ {
			if i != j {
				nodes[i].AddPeer(peers[j])
			}
		}
	}
	return
}
Exemple #14
0
func (s *XLSuite) TestSoloMember(c *C) {
	if VERBOSITY > 0 {
		fmt.Println("\nTEST_SOLO_CLIENT")
	}

	rng := xr.MakeSimpleRNG()

	// 1.  create a new ephemeral server ----------------------------
	es, err := NewEphServer()
	c.Assert(err, IsNil)
	c.Assert(es, NotNil)

	server := es.Server

	c.Assert(&server.RegNode.ckPriv.PublicKey,
		DeepEquals, server.GetCommsPublicKey())
	serverName := server.GetName()
	serverID := server.GetNodeID()
	serverEnd := server.GetEndPoint(0)
	serverCK := server.GetCommsPublicKey()
	serverSK := server.GetSigPublicKey()
	c.Assert(serverEnd, NotNil)

	// start the mock server ------------------------------
	err = es.Start()
	c.Assert(err, IsNil)

	// 2. create the solo client ------------------------------------
	name := rng.NextFileName(8)
	lfs := path.Join("tmp", name)
	found, err := xf.PathExists(lfs)
	c.Assert(err, IsNil)
	for found {
		name = rng.NextFileName(8)
		lfs = path.Join("tmp", name)
		found, err = xf.PathExists(lfs)
		c.Assert(err, IsNil)
	}

	ep, err := xt.NewTcpEndPoint("127.0.0.1:0")
	c.Assert(err, IsNil)
	e := []xt.EndPointI{ep}

	nodeID, err := xi.New(nil)
	c.Assert(err, IsNil)

	node, err := xn.NewNew(name, nodeID, lfs)
	c.Assert(err, IsNil)

	sc, err := NewSoloMember(node, serverName, serverID, serverEnd,
		serverCK, serverSK, e)
	c.Assert(err, IsNil)
	c.Assert(sc, NotNil)

	// 3. run the client
	sc.Start()
	err = <-sc.DoneCh

	// 4.  verify that the client LFS exists and is correct ---------
	found, err = xf.PathExists(lfs)
	c.Assert(err, IsNil)
	c.Assert(found, Equals, true)

	// 5.  shut down the client -------------------------------------
	sc.CloseAcc() // should close any acceptors

	// 6.  stop the server, closing its acceptor --------------------
	es.Stop()

}
Exemple #15
0
func main() {
	var err error

	flag.Usage = Usage
	flag.Parse()

	// FIXUPS ///////////////////////////////////////////////////////

	if err != nil {
		fmt.Println("error processing NodeID: %s\n", err.Error())
		os.Exit(-1)
	}
	if *testing {
		if *name == DEFAULT_NAME || *name == "" {
			*name = "testReg"
		}
		if *lfs == DEFAULT_LFS || *lfs == "" {
			*lfs = "./myApp/xlReg"
		} else {
			*lfs = path.Join("tmp", *lfs)
		}
		if *address == DEFAULT_ADDR {
			*address = "127.0.0.1"
		}
		if *globalAddress == DEFAULT_GLOBAL_ADDR {
			*globalAddress = "127.0.0.1"
		}
		if *port == DEFAULT_PORT || *port == 0 {
			*port = TEST_DEFAULT_PORT
		}
	}
	var backingFile string
	if !*ephemeral {
		backingFile = path.Join(*lfs, "idFilter.dat")
	}
	addrAndPort := fmt.Sprintf("%s:%d", *address, *port)
	endPoint, err := xt.NewTcpEndPoint(addrAndPort)
	if err != nil {
		fmt.Printf("not a valid endPoint: %s\n", addrAndPort)
		Usage()
		os.Exit(-1)
	}
	globalAddrAndPort := fmt.Sprintf("%s:%d", *globalAddress, *port)
	globalEndPoint, err := xt.NewTcpEndPoint(globalAddrAndPort)
	if err != nil {
		fmt.Printf("not a valid endPoint: %s\n", globalAddrAndPort)
		Usage()
		os.Exit(-1)
	}

	// SANITY CHECKS ////////////////////////////////////////////////
	if err == nil {
		if *m < 2 {
			*m = 20
		}
		if *k < 2 {
			*k = 8
		}
		err = xf.CheckLFS(*lfs, 0700) // tries to create if it doesn't exist
		if err == nil {
			if *logFile != "" {
				*logFile = path.Join(*lfs, *logFile)
			}
		}
	}
	// DISPLAY STUFF ////////////////////////////////////////////////
	if *verbose || *justShow {
		fmt.Printf("address          = %v\n", *address)
		fmt.Printf("backingFile      = %v\n", backingFile)
		fmt.Printf("clearFilter      = %v\n", *clearFilter)
		fmt.Printf("endPoint         = %v\n", endPoint)
		fmt.Printf("ephemeral        = %v\n", *ephemeral)
		fmt.Printf("globalAddress    = %v\n", *globalAddress)
		fmt.Printf("globalEndPoint   = %v\n", *globalEndPoint)
		fmt.Printf("justShow         = %v\n", *justShow)
		fmt.Printf("k                = %d\n", *k)
		fmt.Printf("lfs              = %s\n", *lfs)
		fmt.Printf("logFile          = %s\n", *logFile)
		fmt.Printf("m                = %d\n", *m)
		fmt.Printf("name             = %s\n", *name)
		fmt.Printf("port             = %d\n", *port)
		fmt.Printf("testing          = %v\n", *testing)
		fmt.Printf("verbose          = %v\n", *verbose)
	}
	if *justShow {
		return
	}
	// SET UP OPTIONS ///////////////////////////////////////////////
	var (
		f      *os.File
		logger *log.Logger
		opt    reg.RegOptions
		rs     *reg.RegServer
	)
	if *logFile != "" {
		f, err = os.OpenFile(*logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0660)
		if err == nil {
			logger = log.New(f, "", log.Ldate|log.Ltime)
		}
	}
	if f != nil {
		defer f.Close()
	}
	if err == nil {
		opt.Address = *address
		opt.BackingFile = backingFile
		opt.ClearFilter = *clearFilter
		opt.Ephemeral = *ephemeral
		opt.GlobalEndPoint = globalEndPoint
		opt.K = uint(*k)
		opt.Lfs = *lfs
		opt.Logger = logger
		opt.M = uint(*m)
		opt.Lfs = *lfs
		opt.Port = fmt.Sprintf("%d", *port)
		opt.T = *t
		opt.Testing = *testing
		opt.Verbose = *verbose

		rs, err = setup(&opt)
		if err == nil {
			err = serve(rs)
		}
	}
	_ = logger // NOT YET
	_ = err
}
Exemple #16
0
// This was copied from cluster_test.go and minimal changes have been
// made.
//
func (s *XLSuite) doTestPair(c *C, rng *xr.PRNG, whichSHA int) {

	if VERBOSITY > 0 {
		fmt.Printf("TEST_PAIR whichSHA = %v\n", whichSHA)
	}

	// read regCred.dat to get keys etc for a registry --------------
	dat, err := ioutil.ReadFile("regCred.dat")
	c.Assert(err, IsNil)
	regCred, err := reg.ParseRegCred(string(dat))
	c.Assert(err, IsNil)
	regServerName := regCred.Name
	regServerID := regCred.ID
	regServerEnd := regCred.EndPoints[0]
	regServerCK := regCred.CommsPubKey
	regServerSK := regCred.SigPubKey

	// Devise a unique cluster name.  We rely on the convention -----
	// that in Upax tests, the local file system for Upax servers is
	// tmp/CLUSTER-NAME/SERVER-NAME.

	clusterName := rng.NextFileName(8)
	clusterPath := filepath.Join("tmp", clusterName)
	found, err := xf.PathExists(clusterPath)
	c.Assert(err, IsNil)
	for found {
		clusterName = rng.NextFileName(8)
		clusterPath = filepath.Join("tmp", clusterName)
		found, err = xf.PathExists(clusterPath)
		c.Assert(err, IsNil)
	}

	// Set the test size in various senses --------------------------
	// K1 is the number of upax servers, and so the cluster size.  K2 is
	// the number of upax clients, M the number of messages sent (items to
	// be added to the Upax store), LMin and LMax message lengths.
	K1 := uint32(2)
	K2 := 1
	M := 16 + rng.Intn(16) // 16..31
	LMin := 64 + rng.Intn(64)
	LMax := 128 + rng.Intn(128)

	// Use an admin client to get a clusterID for this clusterName --
	const EP_COUNT = 2
	an, err := reg.NewAdminClient(regServerName, regServerID, regServerEnd,
		regServerCK, regServerSK, clusterName, uint64(0), K1, EP_COUNT, nil)
	c.Assert(err, IsNil)
	an.Start()
	cn := &an.MemberMaker
	<-cn.DoneCh
	clusterID := cn.ClusterID
	if clusterID == nil {
		fmt.Println("NIL CLUSTER ID: is xlReg running??")
	}
	c.Assert(clusterID, NotNil) // FAILS 2016-11-13
	clusterSize := cn.ClusterMaxSize
	c.Assert(clusterSize, Equals, uint32(K1))
	epCount := cn.EPCount
	c.Assert(epCount, Equals, uint32(EP_COUNT))

	// DEBUG
	// fmt.Printf("cluster %s: %s\n", clusterName, clusterID.String())
	// END

	// Create names and LFSs for the K1 servers ---------------------
	// We create a distinct tmp/clusterName/serverName for each
	// server as its local file system (LFS).
	serverNames := make([]string, K1)
	serverPaths := make([]string, K1)
	ckPriv := make([]*rsa.PrivateKey, K1)
	skPriv := make([]*rsa.PrivateKey, K1)
	for i := uint32(0); i < K1; i++ {
		serverNames[i] = rng.NextFileName(8)
		serverPaths[i] = filepath.Join(clusterPath, serverNames[i])
		found, err = xf.PathExists(serverPaths[i])
		c.Assert(err, IsNil)
		for found {
			serverNames[i] = rng.NextFileName(8)
			serverPaths[i] = filepath.Join(clusterPath, serverNames[i])
			found, err = xf.PathExists(serverPaths[i])
			c.Assert(err, IsNil)
		}
		err = os.MkdirAll(serverPaths[i], 0750)
		c.Assert(err, IsNil)
		ckPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys
		c.Assert(err, IsNil)
		c.Assert(ckPriv[i], NotNil)
		skPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys
		c.Assert(err, IsNil)
		c.Assert(skPriv[i], NotNil)
	}

	// create K1 reg client nodes -----------------------------------
	uc := make([]*reg.UserMember, K1)
	for i := uint32(0); i < K1; i++ {
		var ep *xt.TcpEndPoint
		ep, err = xt.NewTcpEndPoint("127.0.0.1:0")
		c.Assert(err, IsNil)
		e := []xt.EndPointI{ep}
		uc[i], err = reg.NewUserMember(serverNames[i], serverPaths[i],
			ckPriv[i], skPriv[i],
			regServerName, regServerID, regServerEnd, regServerCK, regServerSK,
			clusterName, cn.ClusterAttrs, cn.ClusterID,
			K1, EP_COUNT, e)
		c.Assert(err, IsNil)
		c.Assert(uc[i], NotNil)
		c.Assert(uc[i].ClusterID, NotNil)
	}
	// Start the K1 reg client nodes running ------------------------
	for i := uint32(0); i < K1; i++ {
		uc[i].Start()
	}

	// wait until all reg clientNodes are done ----------------------
	for i := uint32(0); i < K1; i++ {
		err := <-uc[i].MemberMaker.DoneCh
		c.Assert(err, IsNil)
	}

	// verify that all clientNodes have meaningful baseNodes --------
	//for i := 0; i < K1; i++ {
	//	c.Assert(uc[i].GetName(), Equals, serverNames[i])
	//	c.Assert(uc[i].GetNodeID(), NotNil)
	//	c.Assert(uc[i].GetCommsPublicKey(), NotNil)
	//	c.Assert(uc[i].GetSigPublicKey(), NotNil)
	//}

	// verify that all clientNode members have meaningful baseNodes -
	for i := uint32(0); i < K1; i++ {
		// fmt.Printf("  server %s\n", serverNames[i])	// DEBUG
		memberCount := uint32(len(uc[i].Members))
		c.Assert(memberCount, Equals, K1)
		for j := uint32(0); j < memberCount; j++ {
			c.Assert(uc[i].Members[j], NotNil)
			// DEBUG
			// fmt.Printf("    other server[%d] is %s\n", j, serverNames[j])
			// END

			// doesn't work because reg server does not necessarily see
			// members in serverName order.
			// c.Assert(uc[i].Members[j].GetName(), Equals, serverNames[j])
			c.Assert(uc[i].Members[j].Peer.GetName() == "", Equals, false)
			c.Assert(uc[i].Members[j].Peer.GetNodeID(), NotNil)
			c.Assert(uc[i].Members[j].Peer.GetCommsPublicKey(), NotNil)
			c.Assert(uc[i].Members[j].Peer.GetSigPublicKey(), NotNil)
		}
	}

	// convert the reg client nodes to UpaxServers ------------------
	us := make([]*UpaxServer, K1)
	for i := uint32(0); i < K1; i++ {
		err = uc[i].PersistClusterMember() // sometimes panics
		c.Assert(err, IsNil)
		us[i], err = NewUpaxServer(
			ckPriv[i], skPriv[i], &uc[i].ClusterMember, whichSHA)
		c.Assert(err, IsNil)
		c.Assert(us[i], NotNil)
	}
	// verify files are present and then start the servers ----------

	// 11-07 TODO, modified:
	// Run() causes each server to send ItsMe to all other servers;
	// as each gets its Ack, it starts the KeepAlive/Ack cycle running
	// at a 50 ms interval specified as a Run() argument and then sends
	// on DoneCh.  Second parameter is lifetime of the server in
	// keep-alives, say 20 (so 1 sec in total).  When this time has
	// passed, the server will send again on DoneCh, and then shut down.

	// XXX STUB
	for i := uint32(0); i < K1; i++ {
		err = us[i].Run(10*time.Millisecond, 20)
		c.Assert(err, IsNil)
	}

	// Verify servers are running -------------------------
	// 11-18: we wait for the first done from each server.
	//
	// XXX STUB
	for i := uint32(0); i < K1; i++ {
		<-us[i].DoneCh
	}
	// DEBUG
	fmt.Println("pair_test: both servers have sent first DONE")
	// END

	// When all UpaxServers are ready, create K2 clients.--
	// Each upax client creates K3 separate datums of different
	// length (L1..L2) and content.  Each client signals
	// when done.

	// XXX STUB

	// Verify for each of the K2 clients ------------------
	// that its data is present on the selected server.  We
	// do this by an Exists() call on uDir for the server's
	// LFS/U for each item posted.

	// XXX STUB

	// After a reasonable deltaT, verify that both servers--
	// have a copy of each and every datum.

	// XXX STUB

	_, _, _, _ = K2, M, LMin, LMax
}
Exemple #17
0
	"path"
	"runtime"
	"time"
)

var _ = fmt.Print
var _ = xo.NewIPOverlay

const (
	MIN_LEN = 1024
	MAX_LEN = 2048
	Q       = 64 // "too many open files" if 64
)

var (
	ANY_END_POINT, _ = xt.NewTcpEndPoint("127.0.0.1:0")
)

// See cluster_test.go for a general description of these tests.
//
// This test involves nodes executing on a single machine, with accessor
// IP addresses 127.0.0.1:P, where P represents a system-assigned unique
// port number.

// Accept connections from peers until a message is received on stopCh.
// For each message received from a peer, calculate its SHA1 hash,
// send that as a reply, and close the connection.  Send on stoppedCh
// when all replies have been sent.
func (s *XLSuite) nodeAsServer(c *C, node *Node, stopCh, stoppedCh chan bool) {
	acceptor := node.acceptors[0]
	go func() {
Exemple #18
0
func (s *XLSuite) shouldCreateTcpEndPoint(c *C, addr string) *xt.TcpEndPoint {
	ep, err := xt.NewTcpEndPoint(addr)
	c.Assert(err, Equals, nil)
	c.Assert(ep, Not(Equals), nil)
	return ep
}
Exemple #19
-1
func NewEphServer() (ms *EphServer, err error) {

	// Create an XLattice node with quasi-random parameters including
	// low-quality keys and an endPoint in 127.0.0.1, localhost.
	var (
		ckPriv, skPriv *rsa.PrivateKey
		rn             *RegNode
		ep             *xt.TcpEndPoint
		node           *xn.Node
		reg            *Registry
		server         *RegServer
	)

	rng := xr.MakeSimpleRNG()
	name := rng.NextFileName(16)
	idBuf := make([]byte, xu.SHA1_BIN_LEN)
	rng.NextBytes(idBuf)
	lfs := "tmp/" + hex.EncodeToString(idBuf)
	id, err := xi.New(nil)
	if err == nil {
		// XXX cheap keys, too weak for any serious use
		ckPriv, err = rsa.GenerateKey(rand.Reader, 1024)
		if err == nil {
			skPriv, err = rsa.GenerateKey(rand.Reader, 1024)
		}
	}
	if err == nil {
		ep, err = xt.NewTcpEndPoint("127.0.0.1:0")
		eps := []xt.EndPointI{ep}
		if err == nil {
			node, err = xn.New(name, id, lfs, ckPriv, skPriv, nil, eps, nil)
			if err == nil {
				err = node.OpenAcc() // so acceptors are now live
				if err == nil {
					rn, err = NewRegNode(node, ckPriv, skPriv)
					if err == nil {
						// DEBUG
						if rn == nil {
							fmt.Println("regNode is NIL!\n")
						} else {
							fmt.Printf("eph server listening on %s\n",
								rn.GetAcceptor(0).String())
						}
						// END
						// a registry with no clusters and no logger
						opt := &RegOptions{
							EndPoint:       ep, // not used
							Ephemeral:      true,
							GlobalEndPoint: node.GetEndPoint(0),
							Lfs:            lfs, // redundant (is in node's BaseNode)
							Logger:         nil,
							K:              DEFAULT_K,
							M:              DEFAULT_M,
						}
						reg, err = NewRegistry(nil, rn, opt)
						if err == nil {
							server, err = NewRegServer(reg, true, 1)
							if err == nil {
								ms = &EphServer{
									acc:    rn.GetAcceptor(0),
									Server: server,
								}
							}
						}
					}
				}
			}
		}
	}
	return
}
Exemple #20
-1
func setup(opt *reg.RegOptions) (rs *reg.RegServer, err error) {
	// If LFS/.xlattice/reg.config exists, we load that.  Otherwise we
	// create a node.  In either case we force the node to listen on
	// the designated port

	var (
		e                []xt.EndPointI
		node             *xn.Node
		pathToConfigFile string
		rn               *reg.RegNode
		ckPriv, skPriv   *rsa.PrivateKey
	)
	logger := opt.Logger
	verbose := opt.Verbose

	greetings := fmt.Sprintf("xlReg v%s %s start run\n",
		reg.VERSION, reg.VERSION_DATE)
	if verbose {
		fmt.Print(greetings)
	}
	logger.Print(greetings)

	pathToConfigFile = path.Join(path.Join(opt.Lfs, ".xlattice"), "reg.config")
	found, err := xf.PathExists(pathToConfigFile)
	if err == nil {
		if found {
			logger.Printf("Loading existing reg config from %s\n",
				pathToConfigFile)
			// The registry node already exists.  Parse it and we are done.
			var data []byte
			data, err = ioutil.ReadFile(pathToConfigFile)
			if err == nil {
				rn, _, err = reg.ParseRegNode(string(data))
			}
		} else {
			logger.Println("No config file found, creating new registry.")
			// We need to create a registry node from scratch.
			nodeID, _ := xi.New(nil)
			ep, err := xt.NewTcpEndPoint(opt.Address + ":" + opt.Port)
			if err == nil {
				e = []xt.EndPointI{ep}
				ckPriv, err = rsa.GenerateKey(rand.Reader, 2048)
				if err == nil {
					skPriv, err = rsa.GenerateKey(rand.Reader, 2048)
				}
				if err == nil {
					node, err = xn.New("xlReg", nodeID, opt.Lfs, ckPriv, skPriv,
						nil, e, nil)
					if err == nil {
						node.OpenAcc() // XXX needs a complementary close
						if err == nil {
							// DEBUG
							fmt.Printf("XLattice node successfully created\n")
							fmt.Printf("  listening on %s\n", ep.String())
							// END
							rn, err = reg.NewRegNode(node, ckPriv, skPriv)
							if err == nil {
								// DEBUG
								fmt.Printf("regNode successfully created\n")
								// END
								err = xf.MkdirsToFile(pathToConfigFile, 0700)
								if err == nil {
									err = ioutil.WriteFile(pathToConfigFile,
										[]byte(rn.String()), 0400)
									// DEBUG
								} else {
									fmt.Printf("error writing config file: %v\n",
										err.Error())
								}
								// END --------------

								// DEBUG
							} else {
								fmt.Printf("error creating regNode: %v\n",
									err.Error())
								// END
							}
						}
					}
				}
			}
		}
	}
	if err == nil {
		var r *reg.Registry
		r, err = reg.NewRegistry(nil, // nil = clusters so far
			rn, opt) // regNode, options
		if err == nil {
			logger.Printf("Registry name: %s\n", rn.GetName())
			logger.Printf("         ID:   %s\n", rn.GetNodeID().String())
		}
		if err == nil {
			var verbosity int
			if opt.Verbose {
				verbosity++
			}
			rs, err = reg.NewRegServer(r, opt.Testing, verbosity)
		}
	}
	if err != nil {
		logger.Printf("ERROR: %s\n", err.Error())
	}
	return
}