Ejemplo n.º 1
0
// This was copied from cluster_test.go and minimal changes have been
// made.
//
func (s *XLSuite) doTestPair(c *C, rng *xr.PRNG, whichSHA int) {

	if VERBOSITY > 0 {
		fmt.Printf("TEST_PAIR whichSHA = %v\n", whichSHA)
	}

	// read regCred.dat to get keys etc for a registry --------------
	dat, err := ioutil.ReadFile("regCred.dat")
	c.Assert(err, IsNil)
	regCred, err := reg.ParseRegCred(string(dat))
	c.Assert(err, IsNil)
	regServerName := regCred.Name
	regServerID := regCred.ID
	regServerEnd := regCred.EndPoints[0]
	regServerCK := regCred.CommsPubKey
	regServerSK := regCred.SigPubKey

	// Devise a unique cluster name.  We rely on the convention -----
	// that in Upax tests, the local file system for Upax servers is
	// tmp/CLUSTER-NAME/SERVER-NAME.

	clusterName := rng.NextFileName(8)
	clusterPath := filepath.Join("tmp", clusterName)
	found, err := xf.PathExists(clusterPath)
	c.Assert(err, IsNil)
	for found {
		clusterName = rng.NextFileName(8)
		clusterPath = filepath.Join("tmp", clusterName)
		found, err = xf.PathExists(clusterPath)
		c.Assert(err, IsNil)
	}

	// Set the test size in various senses --------------------------
	// K1 is the number of upax servers, and so the cluster size.  K2 is
	// the number of upax clients, M the number of messages sent (items to
	// be added to the Upax store), LMin and LMax message lengths.
	K1 := uint32(2)
	K2 := 1
	M := 16 + rng.Intn(16) // 16..31
	LMin := 64 + rng.Intn(64)
	LMax := 128 + rng.Intn(128)

	// Use an admin client to get a clusterID for this clusterName --
	const EP_COUNT = 2
	an, err := reg.NewAdminClient(regServerName, regServerID, regServerEnd,
		regServerCK, regServerSK, clusterName, uint64(0), K1, EP_COUNT, nil)
	c.Assert(err, IsNil)
	an.Start()
	cn := &an.MemberMaker
	<-cn.DoneCh
	clusterID := cn.ClusterID
	if clusterID == nil {
		fmt.Println("NIL CLUSTER ID: is xlReg running??")
	}
	c.Assert(clusterID, NotNil) // FAILS 2016-11-13
	clusterSize := cn.ClusterMaxSize
	c.Assert(clusterSize, Equals, uint32(K1))
	epCount := cn.EPCount
	c.Assert(epCount, Equals, uint32(EP_COUNT))

	// DEBUG
	// fmt.Printf("cluster %s: %s\n", clusterName, clusterID.String())
	// END

	// Create names and LFSs for the K1 servers ---------------------
	// We create a distinct tmp/clusterName/serverName for each
	// server as its local file system (LFS).
	serverNames := make([]string, K1)
	serverPaths := make([]string, K1)
	ckPriv := make([]*rsa.PrivateKey, K1)
	skPriv := make([]*rsa.PrivateKey, K1)
	for i := uint32(0); i < K1; i++ {
		serverNames[i] = rng.NextFileName(8)
		serverPaths[i] = filepath.Join(clusterPath, serverNames[i])
		found, err = xf.PathExists(serverPaths[i])
		c.Assert(err, IsNil)
		for found {
			serverNames[i] = rng.NextFileName(8)
			serverPaths[i] = filepath.Join(clusterPath, serverNames[i])
			found, err = xf.PathExists(serverPaths[i])
			c.Assert(err, IsNil)
		}
		err = os.MkdirAll(serverPaths[i], 0750)
		c.Assert(err, IsNil)
		ckPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys
		c.Assert(err, IsNil)
		c.Assert(ckPriv[i], NotNil)
		skPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys
		c.Assert(err, IsNil)
		c.Assert(skPriv[i], NotNil)
	}

	// create K1 reg client nodes -----------------------------------
	uc := make([]*reg.UserMember, K1)
	for i := uint32(0); i < K1; i++ {
		var ep *xt.TcpEndPoint
		ep, err = xt.NewTcpEndPoint("127.0.0.1:0")
		c.Assert(err, IsNil)
		e := []xt.EndPointI{ep}
		uc[i], err = reg.NewUserMember(serverNames[i], serverPaths[i],
			ckPriv[i], skPriv[i],
			regServerName, regServerID, regServerEnd, regServerCK, regServerSK,
			clusterName, cn.ClusterAttrs, cn.ClusterID,
			K1, EP_COUNT, e)
		c.Assert(err, IsNil)
		c.Assert(uc[i], NotNil)
		c.Assert(uc[i].ClusterID, NotNil)
	}
	// Start the K1 reg client nodes running ------------------------
	for i := uint32(0); i < K1; i++ {
		uc[i].Start()
	}

	// wait until all reg clientNodes are done ----------------------
	for i := uint32(0); i < K1; i++ {
		err := <-uc[i].MemberMaker.DoneCh
		c.Assert(err, IsNil)
	}

	// verify that all clientNodes have meaningful baseNodes --------
	//for i := 0; i < K1; i++ {
	//	c.Assert(uc[i].GetName(), Equals, serverNames[i])
	//	c.Assert(uc[i].GetNodeID(), NotNil)
	//	c.Assert(uc[i].GetCommsPublicKey(), NotNil)
	//	c.Assert(uc[i].GetSigPublicKey(), NotNil)
	//}

	// verify that all clientNode members have meaningful baseNodes -
	for i := uint32(0); i < K1; i++ {
		// fmt.Printf("  server %s\n", serverNames[i])	// DEBUG
		memberCount := uint32(len(uc[i].Members))
		c.Assert(memberCount, Equals, K1)
		for j := uint32(0); j < memberCount; j++ {
			c.Assert(uc[i].Members[j], NotNil)
			// DEBUG
			// fmt.Printf("    other server[%d] is %s\n", j, serverNames[j])
			// END

			// doesn't work because reg server does not necessarily see
			// members in serverName order.
			// c.Assert(uc[i].Members[j].GetName(), Equals, serverNames[j])
			c.Assert(uc[i].Members[j].Peer.GetName() == "", Equals, false)
			c.Assert(uc[i].Members[j].Peer.GetNodeID(), NotNil)
			c.Assert(uc[i].Members[j].Peer.GetCommsPublicKey(), NotNil)
			c.Assert(uc[i].Members[j].Peer.GetSigPublicKey(), NotNil)
		}
	}

	// convert the reg client nodes to UpaxServers ------------------
	us := make([]*UpaxServer, K1)
	for i := uint32(0); i < K1; i++ {
		err = uc[i].PersistClusterMember() // sometimes panics
		c.Assert(err, IsNil)
		us[i], err = NewUpaxServer(
			ckPriv[i], skPriv[i], &uc[i].ClusterMember, whichSHA)
		c.Assert(err, IsNil)
		c.Assert(us[i], NotNil)
	}
	// verify files are present and then start the servers ----------

	// 11-07 TODO, modified:
	// Run() causes each server to send ItsMe to all other servers;
	// as each gets its Ack, it starts the KeepAlive/Ack cycle running
	// at a 50 ms interval specified as a Run() argument and then sends
	// on DoneCh.  Second parameter is lifetime of the server in
	// keep-alives, say 20 (so 1 sec in total).  When this time has
	// passed, the server will send again on DoneCh, and then shut down.

	// XXX STUB
	for i := uint32(0); i < K1; i++ {
		err = us[i].Run(10*time.Millisecond, 20)
		c.Assert(err, IsNil)
	}

	// Verify servers are running -------------------------
	// 11-18: we wait for the first done from each server.
	//
	// XXX STUB
	for i := uint32(0); i < K1; i++ {
		<-us[i].DoneCh
	}
	// DEBUG
	fmt.Println("pair_test: both servers have sent first DONE")
	// END

	// When all UpaxServers are ready, create K2 clients.--
	// Each upax client creates K3 separate datums of different
	// length (L1..L2) and content.  Each client signals
	// when done.

	// XXX STUB

	// Verify for each of the K2 clients ------------------
	// that its data is present on the selected server.  We
	// do this by an Exists() call on uDir for the server's
	// LFS/U for each item posted.

	// XXX STUB

	// After a reasonable deltaT, verify that both servers--
	// have a copy of each and every datum.

	// XXX STUB

	_, _, _, _ = K2, M, LMin, LMax
}
Ejemplo n.º 2
0
func (s *XLSuite) doTestCluster(c *C, rng *xr.PRNG, whichSHA int) {

	if VERBOSITY > 0 {
		fmt.Printf("TEST_CLUSTER whichSHA = %v\n", whichSHA)
	}

	// read regCred.dat to get keys etc for a registry --------------
	dat, err := ioutil.ReadFile("regCred.dat")
	c.Assert(err, IsNil)
	regCred, err := reg.ParseRegCred(string(dat))
	c.Assert(err, IsNil)
	regServerName := regCred.Name
	regServerID := regCred.ID
	regServerEnd := regCred.EndPoints[0]
	regServerCK := regCred.CommsPubKey
	regServerSK := regCred.SigPubKey

	// Devise a unique cluster name.  We rely on the convention -----
	// that in Upax tests, the local file system for Upax servers is
	// tmp/CLUSTER-NAME/SERVER-NAME.

	clusterName := rng.NextFileName(8)
	clusterPath := filepath.Join("tmp", clusterName)
	for {
		if _, err = os.Stat(clusterPath); os.IsNotExist(err) {
			break
		}
		clusterName = rng.NextFileName(8)
		clusterPath = filepath.Join("tmp", clusterName)
	}
	err = xf.CheckLFS(clusterPath, 0750)
	c.Assert(err, IsNil)

	// DEBUG
	fmt.Printf("CLUSTER      %s\n", clusterName)
	fmt.Printf("CLUSTER_PATH %s\n", clusterPath)
	// END

	// Set the test size in various senses --------------------------
	// K1 is the number of servers, and so the cluster size.  K2 is
	// the number of clients, M the number of messages sent (items to
	// be added to the Upax store), LMin and LMax message lengths.
	K1 := uint32(3 + rng.Intn(5)) // so 3..7
	K2 := uint32(2 + rng.Intn(4)) // so 2..5
	M := 16 + rng.Intn(16)        // 16..31
	LMin := 64 + rng.Intn(64)
	LMax := 128 + rng.Intn(128)

	// Use an admin client to get a clusterID for this clusterName --
	const EP_COUNT = 2
	an, err := reg.NewAdminClient(regServerName, regServerID, regServerEnd,
		regServerCK, regServerSK, clusterName, uint64(0), K1, EP_COUNT, nil)
	c.Assert(err, IsNil)
	an.Start()
	<-an.DoneCh

	clusterID := an.ClusterID // a NodeID, not []byte
	if clusterID == nil {
		fmt.Println("NIL CLUSTER ID: is xlReg running??")
	}
	c.Assert(clusterID, NotNil)
	clusterSize := an.ClusterMaxSize
	c.Assert(clusterSize, Equals, uint32(K1))
	epCount := an.EPCount
	c.Assert(epCount, Equals, uint32(EP_COUNT))

	// Create names and LFSs for the K1 members ---------------------
	// We create a distinct tmp/clusterName/serverName for each
	// server as its local file system (LFS).
	memberNames := make([]string, K1)
	memberPaths := make([]string, K1)
	ckPriv := make([]*rsa.PrivateKey, K1)
	skPriv := make([]*rsa.PrivateKey, K1)
	for i := uint32(0); i < K1; i++ {
		var found bool
		memberNames[i] = rng.NextFileName(8)
		memberPaths[i] = filepath.Join(clusterPath, memberNames[i])
		found, err = xf.PathExists(memberPaths[i])
		c.Assert(err, IsNil)
		for found {
			memberNames[i] = rng.NextFileName(8)
			memberPaths[i] = filepath.Join(clusterPath, memberNames[i])
			found, err = xf.PathExists(memberPaths[i])
			c.Assert(err, IsNil)
		}
		// DEBUG
		fmt.Printf("MEMBER_PATH[%d]: %s\n", i, memberPaths[i])
		// END
		err = os.MkdirAll(memberPaths[i], 0750)
		c.Assert(err, IsNil)
		ckPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys
		c.Assert(err, IsNil)
		c.Assert(ckPriv[i], NotNil)
		skPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys
		c.Assert(err, IsNil)
		c.Assert(skPriv[i], NotNil)
	}

	// create K1 client nodes ---------------------------------------
	uc := make([]*reg.UserMember, K1)
	for i := uint32(0); i < K1; i++ {
		var ep1, ep2 *xt.TcpEndPoint
		ep1, err = xt.NewTcpEndPoint("127.0.0.1:0")
		ep2, err = xt.NewTcpEndPoint("127.0.0.1:0")
		c.Assert(err, IsNil)
		e := []xt.EndPointI{ep1, ep2}
		uc[i], err = reg.NewUserMember(memberNames[i], memberPaths[i],
			ckPriv[i], skPriv[i],
			regServerName, regServerID, regServerEnd, regServerCK, regServerSK,
			clusterName, an.ClusterAttrs, an.ClusterID,
			K1, EP_COUNT, e)
		c.Assert(err, IsNil)
		c.Assert(uc[i], NotNil)
		c.Assert(uc[i].ClusterID, NotNil)
		c.Assert(uc[i].MemberMaker.DoneCh, NotNil)
	}
	// Start the K1 client nodes running ----------------------------
	for i := uint32(0); i < K1; i++ {
		uc[i].Start()
	}

	fmt.Println("ALL CLIENTS STARTED")

	// wait until all clientNodes are done --------------------------
	for i := uint32(0); i < K1; i++ {
		err = <-uc[i].MemberMaker.DoneCh
		c.Assert(err, IsNil)
		// nodeID := uc[i].clientID
	}
	fmt.Println("ALL CLIENTS DONE") // XXX NOT SEEN

	// verify that all clientNodes have meaningful baseNodes --------
	// XXX THESE TESTS ALWAYS FAIL
	//for i := 0; i < K1; i++ {
	//	c.Assert(uc[i].GetName(), Equals, memberNames[i])
	//	c.Assert(uc[i].GetNodeID(), NotNil)
	//	c.Assert(uc[i].GetCommsPublicKey(), NotNil)
	//	c.Assert(uc[i].GetSigPublicKey(), NotNil)
	//}
	// convert the client nodes to UpaxServers ----------------------
	us := make([]*UpaxServer, K1)
	for i := uint32(0); i < K1; i++ {
		err = uc[i].PersistClusterMember()
		c.Assert(err, IsNil)
		us[i], err = NewUpaxServer(
			ckPriv[i], skPriv[i], &uc[i].ClusterMember, whichSHA)
		c.Assert(err, IsNil)
		c.Assert(us[i], NotNil)
	}
	// verify files are present and then start the servers ----------

	// 11-07 TODO, modified:
	// Run() causes each server to send ItsMe to all other servers;
	// as each gets its Ack, it starts the KeepAlive/Ack cycle running
	// at a 50 ms interval specified as a Run() argument and then sends
	// on DoneCh.  Second parameter is lifetime of the server in
	// keep-alives, say 20 (so 1 sec in total).  When this time has
	// passed, the server will send again on DoneCh, and then shut down.

	// XXX STUB
	for i := uint32(0); i < K1; i++ {
		err = us[i].Run(10*time.Millisecond, 20)
		c.Assert(err, IsNil)
	}

	// Verify servers are running -------------------------
	// 11-18: we wait for the first done from each server.
	//
	// XXX STUB
	for i := uint32(0); i < K1; i++ {
		<-us[i].DoneCh
	}
	// DEBUG
	fmt.Println("all servers have sent first DONE")
	// END

	// When all UpaxServers are ready, create K2 clients.--
	// Each client creates K3 separate datums of differnt
	// length (L1..L2) and content.  Each client signals
	// when done.

	// XXX STUB

	// Verify for each of the K2 clients ------------------
	// that its data is present on the selected server.  We
	// do this by an Exists() call on uDir for the server's
	// LFS/U for each item posted.

	// XXX STUB

	// After a reasonable deltaT, verify that all servers--
	// have a copy of each and every datum.

	// XXX STUB

	_, _, _, _ = K2, M, LMin, LMax
}