func (n *Node) setLFS(val string) (err error) { if val == "" { err = NilLFS } else { err = xf.CheckLFS(val, 0700) } if err == nil { n.lfs = val } return }
func NewMockUpaxClient(name, lfs string, members []*xcl.MemberInfo, primary uint) (mc *MockUpaxClient, err error) { var ( ckPriv, skPriv *rsa.PrivateKey ep []xt.EndPointI node *xn.Node uc *UpaxClient ) // lfs should be a well-formed POSIX path; if the directory does // not exist we should create it. err = xf.CheckLFS(lfs, 0750) // The ckPriv is an RSA key used to encrypt short messages. if err == nil { if ckPriv == nil { ckPriv, err = rsa.GenerateKey(rand.Reader, 2048) } if err == nil { // The skPriv is an RSA key used to create digital signatures. if skPriv == nil { skPriv, err = rsa.GenerateKey(rand.Reader, 2048) } } } // The mock client uses a system-assigned endpoint if err == nil { var endPoint *xt.TcpEndPoint endPoint, err = xt.NewTcpEndPoint("127.0.0.1:0") if err == nil { ep = []xt.EndPointI{endPoint} } } // spin up an XLattice node if err == nil { node, err = xn.New(name, nil, // get a default NodeID lfs, ckPriv, skPriv, nil, ep, nil) // nil overlays, peers } if err == nil { uc, err = NewUpaxClient(ckPriv, skPriv, node, members, primary) if err == nil { mc = &MockUpaxClient{ UpaxClient: *uc, } } } return }
// Create the Node for this client and write the serialized ClusterMember // to the conventional place in the file system. func (mn *MemberMaker) PersistClusterMember() (err error) { var config string // XXX check attrs, etc lfs := mn.ClusterMember.Node.GetLFS() pathToCfgDir := path.Join(lfs, ".xlattice") pathToCfgFile := path.Join(pathToCfgDir, "cluster.member.config") _, err = os.Stat(pathToCfgDir) if os.IsNotExist(err) { err = xf.CheckLFS(pathToCfgDir, 0740) } if err == nil { config = mn.ClusterMember.String() if err == nil { err = ioutil.WriteFile(pathToCfgFile, []byte(config), 0600) } } return }
// Create just the Node for this member and write it to the conventional // place in the file system. func (mm *MemberMaker) PersistNode() (err error) { var ( config string ) // XXX check attrs, etc lfs := mm.ClusterMember.Node.GetLFS() pathToCfgDir := path.Join(lfs, ".xlattice") pathToCfgFile := path.Join(pathToCfgDir, "node.config") found, err := xf.PathExists(pathToCfgDir) if err == nil && !found { err = xf.CheckLFS(pathToCfgDir, 0750) } if err == nil { //mm.Node = *node config = mm.Node.String() } if err == nil { err = ioutil.WriteFile(pathToCfgFile, []byte(config), 0600) } return }
// XXX Creating a Node with a list of live connections seems nonsensical. func New(name string, id *xi.NodeID, lfs string, ckPriv, skPriv *rsa.PrivateKey, o []xo.OverlayI, e []xt.EndPointI, p []*Peer) (n *Node, err error) { // lfs should be a well-formed POSIX path; if the directory does // not exist we should create it. err = xf.CheckLFS(lfs, 0700) // The ckPriv is an RSA key used to encrypt short messages. if err == nil { if ckPriv == nil { ckPriv, err = rsa.GenerateKey(rand.Reader, 2048) } if err == nil { // The skPriv is an RSA key used to create digital signatures. if skPriv == nil { skPriv, err = rsa.GenerateKey(rand.Reader, 2048) } } } // The node communicates through its endpoints. These are // contained in overlays. If an endpoint in 127.0.0.0/8 // is in the list of endpoints, that overlay is automatically // added to the list of overlays with the name "localhost". // Other IPv4 endpoints are assumed to be in 0.0.0.0/0 // ("globalV4") unless there is another containing overlay // except that endpoints in private address space are treated // differently. Unless there is an overlay with a containing // address space, addresses in 10/8 are assigned to "privateA", // addresses in 172.16/12 are assigned to "privateB", and // any in 192.168/16 are assigned to "privateC". All of these // overlays are automatically created unless there is a // pre-existing overlay whose address range is the same as one // of these are contained within one of them. var ( endPoints []xt.EndPointI acceptors []xt.AcceptorI // each must share index with endPoint overlays []xo.OverlayI m *xi.IDMap peers []*Peer // an empty slice ) if err == nil { m, err = xi.NewNewIDMap() } if err == nil { if p != nil { count := len(p) for i := 0; i < count; i++ { err = m.Insert(p[i].GetNodeID().Value(), &p[i]) if err != nil { break } peers = append(peers, p[i]) } } } if err == nil { commsPubKey := &(*ckPriv).PublicKey sigPubKey := &(*skPriv).PublicKey var baseNode *BaseNode baseNode, err = NewBaseNode(name, id, commsPubKey, sigPubKey, overlays) if err == nil { n = &Node{ckPriv: ckPriv, skPriv: skPriv, acceptors: acceptors, endPoints: endPoints, peers: peers, gateways: nil, lfs: lfs, peerMap: m, BaseNode: *baseNode} if err == nil { if o != nil { count := len(o) for i := 0; i < count; i++ { overlays = append(overlays, o[i]) } } if e != nil { count := len(e) for i := 0; i < count; i++ { // _, err = addEndPoint(e[i], &endPoints, &acceptors, &overlays) _, err = n.AddEndPoint(e[i]) } } } } } return }
func (s *XLSuite) doTestCluster(c *C, rng *xr.PRNG, whichSHA int) { if VERBOSITY > 0 { fmt.Printf("TEST_CLUSTER whichSHA = %v\n", whichSHA) } // read regCred.dat to get keys etc for a registry -------------- dat, err := ioutil.ReadFile("regCred.dat") c.Assert(err, IsNil) regCred, err := reg.ParseRegCred(string(dat)) c.Assert(err, IsNil) regServerName := regCred.Name regServerID := regCred.ID regServerEnd := regCred.EndPoints[0] regServerCK := regCred.CommsPubKey regServerSK := regCred.SigPubKey // Devise a unique cluster name. We rely on the convention ----- // that in Upax tests, the local file system for Upax servers is // tmp/CLUSTER-NAME/SERVER-NAME. clusterName := rng.NextFileName(8) clusterPath := filepath.Join("tmp", clusterName) for { if _, err = os.Stat(clusterPath); os.IsNotExist(err) { break } clusterName = rng.NextFileName(8) clusterPath = filepath.Join("tmp", clusterName) } err = xf.CheckLFS(clusterPath, 0750) c.Assert(err, IsNil) // DEBUG fmt.Printf("CLUSTER %s\n", clusterName) fmt.Printf("CLUSTER_PATH %s\n", clusterPath) // END // Set the test size in various senses -------------------------- // K1 is the number of servers, and so the cluster size. K2 is // the number of clients, M the number of messages sent (items to // be added to the Upax store), LMin and LMax message lengths. K1 := uint32(3 + rng.Intn(5)) // so 3..7 K2 := uint32(2 + rng.Intn(4)) // so 2..5 M := 16 + rng.Intn(16) // 16..31 LMin := 64 + rng.Intn(64) LMax := 128 + rng.Intn(128) // Use an admin client to get a clusterID for this clusterName -- const EP_COUNT = 2 an, err := reg.NewAdminClient(regServerName, regServerID, regServerEnd, regServerCK, regServerSK, clusterName, uint64(0), K1, EP_COUNT, nil) c.Assert(err, IsNil) an.Start() <-an.DoneCh clusterID := an.ClusterID // a NodeID, not []byte if clusterID == nil { fmt.Println("NIL CLUSTER ID: is xlReg running??") } c.Assert(clusterID, NotNil) clusterSize := an.ClusterMaxSize c.Assert(clusterSize, Equals, uint32(K1)) epCount := an.EPCount c.Assert(epCount, Equals, uint32(EP_COUNT)) // Create names and LFSs for the K1 members --------------------- // We create a distinct tmp/clusterName/serverName for each // server as its local file system (LFS). memberNames := make([]string, K1) memberPaths := make([]string, K1) ckPriv := make([]*rsa.PrivateKey, K1) skPriv := make([]*rsa.PrivateKey, K1) for i := uint32(0); i < K1; i++ { var found bool memberNames[i] = rng.NextFileName(8) memberPaths[i] = filepath.Join(clusterPath, memberNames[i]) found, err = xf.PathExists(memberPaths[i]) c.Assert(err, IsNil) for found { memberNames[i] = rng.NextFileName(8) memberPaths[i] = filepath.Join(clusterPath, memberNames[i]) found, err = xf.PathExists(memberPaths[i]) c.Assert(err, IsNil) } // DEBUG fmt.Printf("MEMBER_PATH[%d]: %s\n", i, memberPaths[i]) // END err = os.MkdirAll(memberPaths[i], 0750) c.Assert(err, IsNil) ckPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys c.Assert(err, IsNil) c.Assert(ckPriv[i], NotNil) skPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys c.Assert(err, IsNil) c.Assert(skPriv[i], NotNil) } // create K1 client nodes --------------------------------------- uc := make([]*reg.UserMember, K1) for i := uint32(0); i < K1; i++ { var ep1, ep2 *xt.TcpEndPoint ep1, err = xt.NewTcpEndPoint("127.0.0.1:0") ep2, err = xt.NewTcpEndPoint("127.0.0.1:0") c.Assert(err, IsNil) e := []xt.EndPointI{ep1, ep2} uc[i], err = reg.NewUserMember(memberNames[i], memberPaths[i], ckPriv[i], skPriv[i], regServerName, regServerID, regServerEnd, regServerCK, regServerSK, clusterName, an.ClusterAttrs, an.ClusterID, K1, EP_COUNT, e) c.Assert(err, IsNil) c.Assert(uc[i], NotNil) c.Assert(uc[i].ClusterID, NotNil) c.Assert(uc[i].MemberMaker.DoneCh, NotNil) } // Start the K1 client nodes running ---------------------------- for i := uint32(0); i < K1; i++ { uc[i].Start() } fmt.Println("ALL CLIENTS STARTED") // wait until all clientNodes are done -------------------------- for i := uint32(0); i < K1; i++ { err = <-uc[i].MemberMaker.DoneCh c.Assert(err, IsNil) // nodeID := uc[i].clientID } fmt.Println("ALL CLIENTS DONE") // XXX NOT SEEN // verify that all clientNodes have meaningful baseNodes -------- // XXX THESE TESTS ALWAYS FAIL //for i := 0; i < K1; i++ { // c.Assert(uc[i].GetName(), Equals, memberNames[i]) // c.Assert(uc[i].GetNodeID(), NotNil) // c.Assert(uc[i].GetCommsPublicKey(), NotNil) // c.Assert(uc[i].GetSigPublicKey(), NotNil) //} // convert the client nodes to UpaxServers ---------------------- us := make([]*UpaxServer, K1) for i := uint32(0); i < K1; i++ { err = uc[i].PersistClusterMember() c.Assert(err, IsNil) us[i], err = NewUpaxServer( ckPriv[i], skPriv[i], &uc[i].ClusterMember, whichSHA) c.Assert(err, IsNil) c.Assert(us[i], NotNil) } // verify files are present and then start the servers ---------- // 11-07 TODO, modified: // Run() causes each server to send ItsMe to all other servers; // as each gets its Ack, it starts the KeepAlive/Ack cycle running // at a 50 ms interval specified as a Run() argument and then sends // on DoneCh. Second parameter is lifetime of the server in // keep-alives, say 20 (so 1 sec in total). When this time has // passed, the server will send again on DoneCh, and then shut down. // XXX STUB for i := uint32(0); i < K1; i++ { err = us[i].Run(10*time.Millisecond, 20) c.Assert(err, IsNil) } // Verify servers are running ------------------------- // 11-18: we wait for the first done from each server. // // XXX STUB for i := uint32(0); i < K1; i++ { <-us[i].DoneCh } // DEBUG fmt.Println("all servers have sent first DONE") // END // When all UpaxServers are ready, create K2 clients.-- // Each client creates K3 separate datums of differnt // length (L1..L2) and content. Each client signals // when done. // XXX STUB // Verify for each of the K2 clients ------------------ // that its data is present on the selected server. We // do this by an Exists() call on uDir for the server's // LFS/U for each item posted. // XXX STUB // After a reasonable deltaT, verify that all servers-- // have a copy of each and every datum. // XXX STUB _, _, _, _ = K2, M, LMin, LMax }
func (s *XLSuite) TestEphServer(c *C) { if VERBOSITY > 0 { fmt.Println("\nTEST_EPH_SERVER") } rng := xr.MakeSimpleRNG() // 1. create a new ephemeral server ---------------------------- es, err := NewEphServer() c.Assert(err, IsNil) c.Assert(es, NotNil) server := es.Server c.Assert(&server.RegNode.ckPriv.PublicKey, DeepEquals, server.GetCommsPublicKey()) serverName := server.GetName() serverID := server.GetNodeID() serverEnd := server.GetEndPoint(0) serverCK := server.GetCommsPublicKey() serverSK := server.GetSigPublicKey() c.Assert(serverEnd, NotNil) // start the ephemeral server ------------------------- err = es.Start() c.Assert(err, IsNil) defer es.Stop() // stop the server, closing its acceptor // DEBUG fmt.Printf("TestEphServer: server acc %s\n", server.GetAcceptor().String()) fmt.Printf(" serverEnd %s\n", server.GetEndPoint(0).String()) // END // verify Bloom filter is running reg := es.Server.Registry c.Assert(reg, NotNil) regID := reg.GetNodeID() c.Assert(reg.IDCount(), Equals, uint(1)) // the registry's own ID found, err := reg.ContainsID(regID) c.Assert(found, Equals, true) c.Assert(reg.IDCount(), Equals, uint(1)) // 2. create a random cluster name, size, scratch directory ----- clusterName := rng.NextFileName(8) clusterDir := path.Join("tmp", clusterName) for { if _, err = os.Stat(clusterDir); os.IsNotExist(err) { break } clusterName = rng.NextFileName(8) clusterDir = path.Join("tmp", clusterName) } err = xf.CheckLFS(clusterDir, 0750) c.Assert(err, IsNil) // DEBUG fmt.Printf("CLUSTER NAME: %s\n", clusterName) // END clusterAttrs := uint64(rng.Int63()) K := uint32(2 + rng.Intn(6)) // so the size is 2 .. 7 // 3. create an AdminClient, use it to get the clusterID // DEBUG fmt.Printf("\neph_server_test: creating ADMIN client\n") // END an, err := NewAdminClient(serverName, serverID, serverEnd, serverCK, serverSK, clusterName, clusterAttrs, K, uint32(1), nil) c.Assert(err, IsNil) an.Start() err = <-an.DoneCh c.Assert(err, IsNil) anID := an.ClusterMember.Node.GetNodeID() // DEBUG fmt.Println("\nADMIN CLIENT GETS:") fmt.Printf(" regID %s\n", regID.String()) fmt.Printf(" anID %s\n", anID.String()) if an.ClusterID == nil { fmt.Printf(" ClusterID NIL\n") } else { fmt.Printf(" ClusterID %s\n", an.ClusterID.String()) } // END c.Check(reg.IDCount(), Equals, uint(3)) c.Assert(an.ClusterID, NotNil) // the purpose of the exercise c.Assert(an.EPCount, Equals, uint32(1)) found, err = reg.ContainsID(regID) c.Assert(err, IsNil) c.Assert(found, Equals, true) found, err = reg.ContainsID(anID) c.Assert(err, IsNil) c.Check(found, Equals, true) found, err = reg.ContainsID(an.ClusterID) c.Assert(err, IsNil) c.Check(found, Equals, true) c.Check(reg.IDCount(), Equals, uint(3)) // regID + anID + clusterID // 4. create K members ------------------------------------------ // DEBUG fmt.Printf("\nCREATING %d MEMBERS\n", K) // END uc := make([]*UserMember, K) ucNames := make([]string, K) namesInUse := make(map[string]bool) epCount := uint32(2) for i := uint32(0); i < K; i++ { var endPoints []xt.EndPointI for j := uint32(0); j < epCount; j++ { var ep *xt.TcpEndPoint ep, err = xt.NewTcpEndPoint("127.0.0.1:0") c.Assert(err, IsNil) endPoints = append(endPoints, ep) } newName := rng.NextFileName(8) _, ok := namesInUse[newName] for ok { newName = rng.NextFileName(8) _, ok = namesInUse[newName] } namesInUse[newName] = true ucNames[i] = newName // guaranteed to be LOCALLY unique lfs := path.Join(clusterDir, newName) uc[i], err = NewUserMember(ucNames[i], lfs, nil, nil, // private RSA keys are generated if nil serverName, serverID, serverEnd, serverCK, serverSK, clusterName, an.ClusterAttrs, an.ClusterID, K, epCount, endPoints) c.Assert(err, IsNil) c.Assert(uc[i], NotNil) c.Assert(uc[i].ClusterID, NotNil) } // 5. initialize the K members, each in a separate goroutine ---- for i := uint32(0); i < K; i++ { uc[i].Start() } // wait until all members are initialized ----------------------- for i := uint32(0); i < K; i++ { doneErr := <-uc[i].MemberMaker.DoneCh c.Assert(doneErr, IsNil) // among other things, the Persist makes the nodes start listening uc[i].MemberMaker.PersistClusterMember() nodeID := uc[i].MemberMaker.GetNodeID() c.Assert(nodeID, NotNil) found, err := reg.ContainsID(nodeID) c.Assert(err, IsNil) c.Check(found, Equals, true) } c.Assert(reg.IDCount(), Equals, uint(3+K)) // regID + anID + clusterID + K // 6. verify that the nodes are live ---------------------------- for i := uint32(0); i < K; i++ { mn := uc[i].MemberMaker cm := mn.ClusterMember node := cm.Node mnEPCount := uint32(node.SizeEndPoints()) c.Assert(mnEPCount, Equals, epCount) actualEPCount := uint32(mn.SizeEndPoints()) c.Assert(actualEPCount, Equals, epCount) actualAccCount := uint32(mn.SizeAcceptors()) c.Assert(actualAccCount, Equals, epCount) for j := uint32(0); j < epCount; j++ { nodeEP := cm.GetEndPoint(int(j)).String() nodeAcc := cm.GetAcceptor(int(j)).String() c.Assert(strings.HasSuffix(nodeEP, ":0"), Equals, false) c.Assert(strings.HasSuffix(nodeAcc, nodeEP), Equals, true) // DEBUG fmt.Printf("node %d: endPoint %d is %s\n", i, j, cm.GetEndPoint(int(j)).String()) // END } } // verify that results are as expected -------------------------- // XXX STUB XXX }
func main() { var err error flag.Usage = Usage flag.Parse() // FIXUPS /////////////////////////////////////////////////////// if err != nil { fmt.Println("error processing NodeID: %s\n", err.Error()) os.Exit(-1) } if *testing { if *name == DEFAULT_NAME || *name == "" { *name = "testReg" } if *lfs == DEFAULT_LFS || *lfs == "" { *lfs = "./myApp/xlReg" } else { *lfs = path.Join("tmp", *lfs) } if *address == DEFAULT_ADDR { *address = "127.0.0.1" } if *globalAddress == DEFAULT_GLOBAL_ADDR { *globalAddress = "127.0.0.1" } if *port == DEFAULT_PORT || *port == 0 { *port = TEST_DEFAULT_PORT } } var backingFile string if !*ephemeral { backingFile = path.Join(*lfs, "idFilter.dat") } addrAndPort := fmt.Sprintf("%s:%d", *address, *port) endPoint, err := xt.NewTcpEndPoint(addrAndPort) if err != nil { fmt.Printf("not a valid endPoint: %s\n", addrAndPort) Usage() os.Exit(-1) } globalAddrAndPort := fmt.Sprintf("%s:%d", *globalAddress, *port) globalEndPoint, err := xt.NewTcpEndPoint(globalAddrAndPort) if err != nil { fmt.Printf("not a valid endPoint: %s\n", globalAddrAndPort) Usage() os.Exit(-1) } // SANITY CHECKS //////////////////////////////////////////////// if err == nil { if *m < 2 { *m = 20 } if *k < 2 { *k = 8 } err = xf.CheckLFS(*lfs, 0700) // tries to create if it doesn't exist if err == nil { if *logFile != "" { *logFile = path.Join(*lfs, *logFile) } } } // DISPLAY STUFF //////////////////////////////////////////////// if *verbose || *justShow { fmt.Printf("address = %v\n", *address) fmt.Printf("backingFile = %v\n", backingFile) fmt.Printf("clearFilter = %v\n", *clearFilter) fmt.Printf("endPoint = %v\n", endPoint) fmt.Printf("ephemeral = %v\n", *ephemeral) fmt.Printf("globalAddress = %v\n", *globalAddress) fmt.Printf("globalEndPoint = %v\n", *globalEndPoint) fmt.Printf("justShow = %v\n", *justShow) fmt.Printf("k = %d\n", *k) fmt.Printf("lfs = %s\n", *lfs) fmt.Printf("logFile = %s\n", *logFile) fmt.Printf("m = %d\n", *m) fmt.Printf("name = %s\n", *name) fmt.Printf("port = %d\n", *port) fmt.Printf("testing = %v\n", *testing) fmt.Printf("verbose = %v\n", *verbose) } if *justShow { return } // SET UP OPTIONS /////////////////////////////////////////////// var ( f *os.File logger *log.Logger opt reg.RegOptions rs *reg.RegServer ) if *logFile != "" { f, err = os.OpenFile(*logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0660) if err == nil { logger = log.New(f, "", log.Ldate|log.Ltime) } } if f != nil { defer f.Close() } if err == nil { opt.Address = *address opt.BackingFile = backingFile opt.ClearFilter = *clearFilter opt.Ephemeral = *ephemeral opt.GlobalEndPoint = globalEndPoint opt.K = uint(*k) opt.Lfs = *lfs opt.Logger = logger opt.M = uint(*m) opt.Lfs = *lfs opt.Port = fmt.Sprintf("%d", *port) opt.T = *t opt.Testing = *testing opt.Verbose = *verbose rs, err = setup(&opt) if err == nil { err = serve(rs) } } _ = logger // NOT YET _ = err }