func (s *XLSuite) doTestMDParser(c *C, rng *xr.PRNG, whichSHA int) { var tHash []byte switch whichSHA { case xu.USING_SHA1: tHash = make([]byte, xu.SHA1_BIN_LEN) case xu.USING_SHA2: tHash = make([]byte, xu.SHA2_BIN_LEN) case xu.USING_SHA3: tHash = make([]byte, xu.SHA3_BIN_LEN) // DEFAULT = ERROR } rng.NextBytes(tHash) // not really a hash, of course sHash := hex.EncodeToString(tHash) // string form of tHash withoutSlash := rng.NextFileName(8) dirName := withoutSlash + "/" length := rng.Intn(4) var rSpaces string for i := 0; i < length; i++ { rSpaces += " " // on the right } // TEST FIRST LINE PARSER ----------------------------- line := sHash + " " + dirName + rSpaces treeHash2, dirName2, err := ParseMerkleDocFirstLine(line) c.Assert(err, IsNil) c.Assert(bytes.Equal(treeHash2, tHash), Equals, true) // we retain the terminating slash in MerkleDoc first lines c.Assert(dirName2, Equals, dirName) }
func (s *XLSuite) doTestParser(c *C, rng *xr.PRNG) { name := s.getAName(rng) a := rng.Intn(256) b := rng.Intn(256) _c := rng.Intn(256) d := rng.Intn(256) bits := rng.Intn(33) aRange := fmt.Sprintf("%d.%d.%d.%d/%d", a, b, _c, d, bits) transport := "tcp" cost := float32(rng.Intn(300)) / 100.0 ar, err := NewCIDRAddrRange(aRange) c.Assert(err, IsNil) o, err := NewIPOverlay(name, ar, transport, cost) c.Assert(err, IsNil) c.Assert(o, Not(IsNil)) c.Assert(name, Equals, o.Name()) // XXX ADDR RANGE MISSING c.Assert(transport, Equals, o.Transport()) c.Assert(float32(cost), Equals, o.Cost()) text := o.String() // DEBUG // fmt.Printf("serialized overlay is %s\n", text) // END o2, err := Parse(text) c.Assert(err, IsNil) c.Assert(text, Equals, o2.String()) }
// Make a message (or reply) of up to 16 AES blocks in size and stuff // it with random bytes. Return the message with PKCS7-padded appended. // func (s *XLSuite) MakeAMsg(c *C, rng *xr.PRNG) ( msg []byte, msgLen int) { msgLen = 2 + rng.Intn(16*aes.BlockSize-2) msg = make([]byte, msgLen) rng.NextBytes(msg) return }
func (s *XLSuite) noDotsOrDashes(rng *xr.PRNG) string { var length int = 3 + rng.Intn(16) var name = rng.NextFileName(length) for len(name) < 3 || strings.ContainsAny(name, ".-") || strings.ContainsAny(name[0:1], "0123456789") { name = rng.NextFileName(length) } return name }
func (s *XLSuite) doTestLoadEntries(c *C, rng *xr.PRNG, whichSHA int) { K := 16 + rng.Intn(16) // create a unique name for a scratch file pathToFile := filepath.Join("tmp", rng.NextFileName(16)) found, err := xf.PathExists(pathToFile) c.Assert(err, IsNil) for found { pathToFile = filepath.Join("tmp", rng.NextFileName(16)) found, err = xf.PathExists(pathToFile) c.Assert(err, IsNil) } f, err := os.OpenFile(pathToFile, os.O_CREATE|os.O_WRONLY, 0600) c.Assert(err, IsNil) // create K entries, saving them in a slice while writing them // to disk var entries []*LogEntry for i := 0; i < K; i++ { t, key, nodeID, src, path := s.makeEntryData(c, rng, whichSHA) entry, err := NewLogEntry(t, key, nodeID, src, path) c.Assert(err, IsNil) strEntry := entry.String() entries = append(entries, entry) var count int count, err = f.WriteString(strEntry + "\n") c.Assert(err, IsNil) c.Assert(count, Equals, len(strEntry)+1) } f.Close() c.Assert(len(entries), Equals, K) // use UpaxServer.LoadEntries to load the stuff in the file. m, err := xi.NewNewIDMap() c.Assert(err, IsNil) count, err := loadEntries(pathToFile, m, whichSHA) c.Assert(err, IsNil) c.Assert(count, Equals, K) // K entries loaded. for i := 0; i < K; i++ { var entry, eInMap *LogEntry var whatever interface{} entry = entries[i] key := entry.key whatever, err = m.Find(key) c.Assert(err, IsNil) c.Assert(whatever, NotNil) eInMap = whatever.(*LogEntry) // DEBUG // XXX NEED LogEntry.Equal() // END c.Assert(bytes.Equal(key, eInMap.key), Equals, true) } }
func (s *XLSuite) doTestKeySelector64(c *C, rng *xr.PRNG, usingSHA1 bool, m uint) { var v uint // length of byte array if usingSHA1 { // v = uint(20) // bytes } else { v = uint(32) } b := make([]byte, v) // value being inserted into filter k := uint((v * 8) / m) // number of hash functions bitSel := make([]byte, k) wordSel := make([]uint, k) // 2^6 is 64, number of bits in a uint64 wordsInFilter := 1 << (m - uint(6)) for i := uint(0); i < k; i++ { bitSel[i] = byte(rng.Intn(64)) wordSel[i] = uint(rng.Intn(wordsInFilter)) } // concatenate the key selectors at the front s.setBitOffsets(c, &b, bitSel) // append the word selectors s.setWordOffsets(c, &b, wordSel, m, k) // create an m,k filter filter, err := NewBloomSHA(m, k) c.Assert(err, IsNil) // verify that the expected bits are NOT set for i := uint(0); i < k; i++ { filterWord := filter.Filter[wordSel[i]] bitSelector := uint64(1) << bitSel[i] bitVal := filterWord & bitSelector c.Assert(bitVal == 0, Equals, true) } // insert the value b filter.Insert(b) // verify that all of the expected bits are set for i := uint(0); i < k; i++ { filterWord := filter.Filter[wordSel[i]] bitSelector := uint64(1) << bitSel[i] bitVal := filterWord & bitSelector c.Assert(bitVal == 0, Equals, false) } }
// Returns a slice of zero or more MiscItems. The slice must not contain // any S-S sequences (which are indistinguishable from a single S. func (s *XLSuite) createMiscItems(rng *xr.PRNG) (items []*MiscItem) { count := rng.Intn(4) // so 0 to 3 inclusive lastWasS := false for i := 0; i < count; i++ { item := s.createMiscItem(true, rng) // true = S ok lastWasS = s.IsS(item.body[0]) for item._type == MISC_S && lastWasS { item = s.createMiscItem(!lastWasS, rng) lastWasS = s.IsS(item.body[0]) } lastWasS = item._type == MISC_S items = append(items, item) } return }
// Populate the K3 byte slices to be used for testing func (muc *MockUpaxClient) createData(rng *xr.PRNG, K3, L1, L2 int) ( err error) { muc.K3 = K3 muc.L1 = L1 muc.L2 = L2 muc.data = make([][]byte, K3) for i := 0; i < K3; i++ { length := L1 + rng.Intn(L2-L1+1) // so L1..L2 inclusive muc.data[i] = make([]byte, length) rng.NextBytes(muc.data[i]) } return }
// PARSER TESTS ===================================================== func (s *XLSuite) doTestParser(c *C, rng *xr.PRNG, whichSHA int) { var tHash []byte switch whichSHA { case xu.USING_SHA1: tHash = make([]byte, xu.SHA1_BIN_LEN) case xu.USING_SHA2: tHash = make([]byte, xu.SHA2_BIN_LEN) case xu.USING_SHA3: tHash = make([]byte, xu.SHA3_BIN_LEN) // XXX DEFAULT = ERROR } rng.NextBytes(tHash) // not really a hash, of course sHash := hex.EncodeToString(tHash) // string form of tHash dirName := rng.NextFileName(8) + "/" nameWithoutSlash := dirName[0 : len(dirName)-1] indent := rng.Intn(4) var lSpaces, rSpaces string for i := 0; i < indent; i++ { lSpaces += " " // on the left rSpaces += " " // on the right } // TEST FIRST LINE PARSER ----------------------------- line := lSpaces + sHash + " " + dirName + rSpaces indent2, treeHash2, dirName2, err := ParseFirstLine(line, " ") c.Assert(err, IsNil) c.Assert(indent2, Equals, indent) c.Assert(bytes.Equal(treeHash2, tHash), Equals, true) c.Assert(dirName2, Equals, nameWithoutSlash) // TEST OTHER LINE PARSER ----------------------------- yesIsDir := rng.NextBoolean() if yesIsDir { line = lSpaces + sHash + " " + dirName + rSpaces } else { line = lSpaces + sHash + " " + nameWithoutSlash + rSpaces } nodeDepth, nodeHash, nodeName, isDir, err := ParseOtherLine(line, " ") c.Assert(err, IsNil) c.Assert(nodeDepth, Equals, indent) c.Assert(bytes.Equal(nodeHash, tHash), Equals, true) c.Assert(nodeName, Equals, nameWithoutSlash) c.Assert(isDir, Equals, yesIsDir) }
// Return either spaces, or a dollar sign, or a random 'word', or a // newline, or nothing. func (s *XLSuite) moreBits(c *C, rng *xr.PRNG) (txt string) { start := rng.Intn(7) switch start { case 0: txt += " " case 1: txt += "$" case 2: txt += "\n" case 3: txt += rng.NextFileName(8) case 4: txt += rng.NextFileName(8) case 5: txt += rng.NextFileName(8) case 6: // nothing } return }
// Returns a slice of zero or more Attributes. Attribute names must be // unique within the slice. func (s *XLSuite) createAttrValPairs(rng *xr.PRNG) (pairs []*AttrValPair) { count := rng.Intn(4) // so 0 to 3 inclusive var byName = make(map[string]*AttrValPair) for i := 0; i < count; i++ { var pair *AttrValPair for { pair = s.createAttrValPair(rng) // attr names must be unique; values need not be name := pair.Attr if _, ok := byName[name]; ok { continue } else { // it's not in the map, so add it byName[name] = pair break } } pairs = append(pairs, pair) } return }
// Create a single randomly chosen MiscItem. If sOK it may be an S. In any // case it may be either a Comment or a PI. func (s *XLSuite) createMiscItem(sOK bool, rng *xr.PRNG) *MiscItem { var body []rune var t MiscType if sOK { t = MiscType(rng.Intn(int(MISC_S) + 1)) } else { t = MiscType(rng.Intn(int(MISC_S))) } switch t { case MISC_COMMENT: // The comment must not end with a dash for { body = []rune(rng.NextFileName(16)) // a quasi-random string, len < 16 text := string(body) if !strings.HasSuffix(text, "-") { break } } case MISC_PI: body = []rune(rng.NextFileName(16)) // a quasi-random string, len < 16 case MISC_S: var runes []rune count := 1 + rng.Intn(3) // 1 to 3 inclusive for i := 0; i < count; i++ { kind := rng.Intn(4) // 0 to 3 inclusive switch kind { case 0: runes = append(runes, '\t') case 1: runes = append(runes, '\n') case 2: runes = append(runes, '\r') case 3: runes = append(runes, ' ') } } body = runes } return &MiscItem{_type: t, body: body} }
// This was copied from cluster_test.go and minimal changes have been // made. // func (s *XLSuite) doTestPair(c *C, rng *xr.PRNG, whichSHA int) { if VERBOSITY > 0 { fmt.Printf("TEST_PAIR whichSHA = %v\n", whichSHA) } // read regCred.dat to get keys etc for a registry -------------- dat, err := ioutil.ReadFile("regCred.dat") c.Assert(err, IsNil) regCred, err := reg.ParseRegCred(string(dat)) c.Assert(err, IsNil) regServerName := regCred.Name regServerID := regCred.ID regServerEnd := regCred.EndPoints[0] regServerCK := regCred.CommsPubKey regServerSK := regCred.SigPubKey // Devise a unique cluster name. We rely on the convention ----- // that in Upax tests, the local file system for Upax servers is // tmp/CLUSTER-NAME/SERVER-NAME. clusterName := rng.NextFileName(8) clusterPath := filepath.Join("tmp", clusterName) found, err := xf.PathExists(clusterPath) c.Assert(err, IsNil) for found { clusterName = rng.NextFileName(8) clusterPath = filepath.Join("tmp", clusterName) found, err = xf.PathExists(clusterPath) c.Assert(err, IsNil) } // Set the test size in various senses -------------------------- // K1 is the number of upax servers, and so the cluster size. K2 is // the number of upax clients, M the number of messages sent (items to // be added to the Upax store), LMin and LMax message lengths. K1 := uint32(2) K2 := 1 M := 16 + rng.Intn(16) // 16..31 LMin := 64 + rng.Intn(64) LMax := 128 + rng.Intn(128) // Use an admin client to get a clusterID for this clusterName -- const EP_COUNT = 2 an, err := reg.NewAdminClient(regServerName, regServerID, regServerEnd, regServerCK, regServerSK, clusterName, uint64(0), K1, EP_COUNT, nil) c.Assert(err, IsNil) an.Start() cn := &an.MemberMaker <-cn.DoneCh clusterID := cn.ClusterID if clusterID == nil { fmt.Println("NIL CLUSTER ID: is xlReg running??") } c.Assert(clusterID, NotNil) // FAILS 2016-11-13 clusterSize := cn.ClusterMaxSize c.Assert(clusterSize, Equals, uint32(K1)) epCount := cn.EPCount c.Assert(epCount, Equals, uint32(EP_COUNT)) // DEBUG // fmt.Printf("cluster %s: %s\n", clusterName, clusterID.String()) // END // Create names and LFSs for the K1 servers --------------------- // We create a distinct tmp/clusterName/serverName for each // server as its local file system (LFS). serverNames := make([]string, K1) serverPaths := make([]string, K1) ckPriv := make([]*rsa.PrivateKey, K1) skPriv := make([]*rsa.PrivateKey, K1) for i := uint32(0); i < K1; i++ { serverNames[i] = rng.NextFileName(8) serverPaths[i] = filepath.Join(clusterPath, serverNames[i]) found, err = xf.PathExists(serverPaths[i]) c.Assert(err, IsNil) for found { serverNames[i] = rng.NextFileName(8) serverPaths[i] = filepath.Join(clusterPath, serverNames[i]) found, err = xf.PathExists(serverPaths[i]) c.Assert(err, IsNil) } err = os.MkdirAll(serverPaths[i], 0750) c.Assert(err, IsNil) ckPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys c.Assert(err, IsNil) c.Assert(ckPriv[i], NotNil) skPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys c.Assert(err, IsNil) c.Assert(skPriv[i], NotNil) } // create K1 reg client nodes ----------------------------------- uc := make([]*reg.UserMember, K1) for i := uint32(0); i < K1; i++ { var ep *xt.TcpEndPoint ep, err = xt.NewTcpEndPoint("127.0.0.1:0") c.Assert(err, IsNil) e := []xt.EndPointI{ep} uc[i], err = reg.NewUserMember(serverNames[i], serverPaths[i], ckPriv[i], skPriv[i], regServerName, regServerID, regServerEnd, regServerCK, regServerSK, clusterName, cn.ClusterAttrs, cn.ClusterID, K1, EP_COUNT, e) c.Assert(err, IsNil) c.Assert(uc[i], NotNil) c.Assert(uc[i].ClusterID, NotNil) } // Start the K1 reg client nodes running ------------------------ for i := uint32(0); i < K1; i++ { uc[i].Start() } // wait until all reg clientNodes are done ---------------------- for i := uint32(0); i < K1; i++ { err := <-uc[i].MemberMaker.DoneCh c.Assert(err, IsNil) } // verify that all clientNodes have meaningful baseNodes -------- //for i := 0; i < K1; i++ { // c.Assert(uc[i].GetName(), Equals, serverNames[i]) // c.Assert(uc[i].GetNodeID(), NotNil) // c.Assert(uc[i].GetCommsPublicKey(), NotNil) // c.Assert(uc[i].GetSigPublicKey(), NotNil) //} // verify that all clientNode members have meaningful baseNodes - for i := uint32(0); i < K1; i++ { // fmt.Printf(" server %s\n", serverNames[i]) // DEBUG memberCount := uint32(len(uc[i].Members)) c.Assert(memberCount, Equals, K1) for j := uint32(0); j < memberCount; j++ { c.Assert(uc[i].Members[j], NotNil) // DEBUG // fmt.Printf(" other server[%d] is %s\n", j, serverNames[j]) // END // doesn't work because reg server does not necessarily see // members in serverName order. // c.Assert(uc[i].Members[j].GetName(), Equals, serverNames[j]) c.Assert(uc[i].Members[j].Peer.GetName() == "", Equals, false) c.Assert(uc[i].Members[j].Peer.GetNodeID(), NotNil) c.Assert(uc[i].Members[j].Peer.GetCommsPublicKey(), NotNil) c.Assert(uc[i].Members[j].Peer.GetSigPublicKey(), NotNil) } } // convert the reg client nodes to UpaxServers ------------------ us := make([]*UpaxServer, K1) for i := uint32(0); i < K1; i++ { err = uc[i].PersistClusterMember() // sometimes panics c.Assert(err, IsNil) us[i], err = NewUpaxServer( ckPriv[i], skPriv[i], &uc[i].ClusterMember, whichSHA) c.Assert(err, IsNil) c.Assert(us[i], NotNil) } // verify files are present and then start the servers ---------- // 11-07 TODO, modified: // Run() causes each server to send ItsMe to all other servers; // as each gets its Ack, it starts the KeepAlive/Ack cycle running // at a 50 ms interval specified as a Run() argument and then sends // on DoneCh. Second parameter is lifetime of the server in // keep-alives, say 20 (so 1 sec in total). When this time has // passed, the server will send again on DoneCh, and then shut down. // XXX STUB for i := uint32(0); i < K1; i++ { err = us[i].Run(10*time.Millisecond, 20) c.Assert(err, IsNil) } // Verify servers are running ------------------------- // 11-18: we wait for the first done from each server. // // XXX STUB for i := uint32(0); i < K1; i++ { <-us[i].DoneCh } // DEBUG fmt.Println("pair_test: both servers have sent first DONE") // END // When all UpaxServers are ready, create K2 clients.-- // Each upax client creates K3 separate datums of different // length (L1..L2) and content. Each client signals // when done. // XXX STUB // Verify for each of the K2 clients ------------------ // that its data is present on the selected server. We // do this by an Exists() call on uDir for the server's // LFS/U for each item posted. // XXX STUB // After a reasonable deltaT, verify that both servers-- // have a copy of each and every datum. // XXX STUB _, _, _, _ = K2, M, LMin, LMax }
func (s *XLSuite) doTestCluster(c *C, rng *xr.PRNG, whichSHA int) { if VERBOSITY > 0 { fmt.Printf("TEST_CLUSTER whichSHA = %v\n", whichSHA) } // read regCred.dat to get keys etc for a registry -------------- dat, err := ioutil.ReadFile("regCred.dat") c.Assert(err, IsNil) regCred, err := reg.ParseRegCred(string(dat)) c.Assert(err, IsNil) regServerName := regCred.Name regServerID := regCred.ID regServerEnd := regCred.EndPoints[0] regServerCK := regCred.CommsPubKey regServerSK := regCred.SigPubKey // Devise a unique cluster name. We rely on the convention ----- // that in Upax tests, the local file system for Upax servers is // tmp/CLUSTER-NAME/SERVER-NAME. clusterName := rng.NextFileName(8) clusterPath := filepath.Join("tmp", clusterName) for { if _, err = os.Stat(clusterPath); os.IsNotExist(err) { break } clusterName = rng.NextFileName(8) clusterPath = filepath.Join("tmp", clusterName) } err = xf.CheckLFS(clusterPath, 0750) c.Assert(err, IsNil) // DEBUG fmt.Printf("CLUSTER %s\n", clusterName) fmt.Printf("CLUSTER_PATH %s\n", clusterPath) // END // Set the test size in various senses -------------------------- // K1 is the number of servers, and so the cluster size. K2 is // the number of clients, M the number of messages sent (items to // be added to the Upax store), LMin and LMax message lengths. K1 := uint32(3 + rng.Intn(5)) // so 3..7 K2 := uint32(2 + rng.Intn(4)) // so 2..5 M := 16 + rng.Intn(16) // 16..31 LMin := 64 + rng.Intn(64) LMax := 128 + rng.Intn(128) // Use an admin client to get a clusterID for this clusterName -- const EP_COUNT = 2 an, err := reg.NewAdminClient(regServerName, regServerID, regServerEnd, regServerCK, regServerSK, clusterName, uint64(0), K1, EP_COUNT, nil) c.Assert(err, IsNil) an.Start() <-an.DoneCh clusterID := an.ClusterID // a NodeID, not []byte if clusterID == nil { fmt.Println("NIL CLUSTER ID: is xlReg running??") } c.Assert(clusterID, NotNil) clusterSize := an.ClusterMaxSize c.Assert(clusterSize, Equals, uint32(K1)) epCount := an.EPCount c.Assert(epCount, Equals, uint32(EP_COUNT)) // Create names and LFSs for the K1 members --------------------- // We create a distinct tmp/clusterName/serverName for each // server as its local file system (LFS). memberNames := make([]string, K1) memberPaths := make([]string, K1) ckPriv := make([]*rsa.PrivateKey, K1) skPriv := make([]*rsa.PrivateKey, K1) for i := uint32(0); i < K1; i++ { var found bool memberNames[i] = rng.NextFileName(8) memberPaths[i] = filepath.Join(clusterPath, memberNames[i]) found, err = xf.PathExists(memberPaths[i]) c.Assert(err, IsNil) for found { memberNames[i] = rng.NextFileName(8) memberPaths[i] = filepath.Join(clusterPath, memberNames[i]) found, err = xf.PathExists(memberPaths[i]) c.Assert(err, IsNil) } // DEBUG fmt.Printf("MEMBER_PATH[%d]: %s\n", i, memberPaths[i]) // END err = os.MkdirAll(memberPaths[i], 0750) c.Assert(err, IsNil) ckPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys c.Assert(err, IsNil) c.Assert(ckPriv[i], NotNil) skPriv[i], err = rsa.GenerateKey(rand.Reader, 1024) // cheap keys c.Assert(err, IsNil) c.Assert(skPriv[i], NotNil) } // create K1 client nodes --------------------------------------- uc := make([]*reg.UserMember, K1) for i := uint32(0); i < K1; i++ { var ep1, ep2 *xt.TcpEndPoint ep1, err = xt.NewTcpEndPoint("127.0.0.1:0") ep2, err = xt.NewTcpEndPoint("127.0.0.1:0") c.Assert(err, IsNil) e := []xt.EndPointI{ep1, ep2} uc[i], err = reg.NewUserMember(memberNames[i], memberPaths[i], ckPriv[i], skPriv[i], regServerName, regServerID, regServerEnd, regServerCK, regServerSK, clusterName, an.ClusterAttrs, an.ClusterID, K1, EP_COUNT, e) c.Assert(err, IsNil) c.Assert(uc[i], NotNil) c.Assert(uc[i].ClusterID, NotNil) c.Assert(uc[i].MemberMaker.DoneCh, NotNil) } // Start the K1 client nodes running ---------------------------- for i := uint32(0); i < K1; i++ { uc[i].Start() } fmt.Println("ALL CLIENTS STARTED") // wait until all clientNodes are done -------------------------- for i := uint32(0); i < K1; i++ { err = <-uc[i].MemberMaker.DoneCh c.Assert(err, IsNil) // nodeID := uc[i].clientID } fmt.Println("ALL CLIENTS DONE") // XXX NOT SEEN // verify that all clientNodes have meaningful baseNodes -------- // XXX THESE TESTS ALWAYS FAIL //for i := 0; i < K1; i++ { // c.Assert(uc[i].GetName(), Equals, memberNames[i]) // c.Assert(uc[i].GetNodeID(), NotNil) // c.Assert(uc[i].GetCommsPublicKey(), NotNil) // c.Assert(uc[i].GetSigPublicKey(), NotNil) //} // convert the client nodes to UpaxServers ---------------------- us := make([]*UpaxServer, K1) for i := uint32(0); i < K1; i++ { err = uc[i].PersistClusterMember() c.Assert(err, IsNil) us[i], err = NewUpaxServer( ckPriv[i], skPriv[i], &uc[i].ClusterMember, whichSHA) c.Assert(err, IsNil) c.Assert(us[i], NotNil) } // verify files are present and then start the servers ---------- // 11-07 TODO, modified: // Run() causes each server to send ItsMe to all other servers; // as each gets its Ack, it starts the KeepAlive/Ack cycle running // at a 50 ms interval specified as a Run() argument and then sends // on DoneCh. Second parameter is lifetime of the server in // keep-alives, say 20 (so 1 sec in total). When this time has // passed, the server will send again on DoneCh, and then shut down. // XXX STUB for i := uint32(0); i < K1; i++ { err = us[i].Run(10*time.Millisecond, 20) c.Assert(err, IsNil) } // Verify servers are running ------------------------- // 11-18: we wait for the first done from each server. // // XXX STUB for i := uint32(0); i < K1; i++ { <-us[i].DoneCh } // DEBUG fmt.Println("all servers have sent first DONE") // END // When all UpaxServers are ready, create K2 clients.-- // Each client creates K3 separate datums of differnt // length (L1..L2) and content. Each client signals // when done. // XXX STUB // Verify for each of the K2 clients ------------------ // that its data is present on the selected server. We // do this by an Exists() call on uDir for the server's // LFS/U for each item posted. // XXX STUB // After a reasonable deltaT, verify that all servers-- // have a copy of each and every datum. // XXX STUB _, _, _, _ = K2, M, LMin, LMax }