func (s *XLSuite) doTestLoadEntries(c *C, rng *xr.PRNG, whichSHA int) { K := 16 + rng.Intn(16) // create a unique name for a scratch file pathToFile := filepath.Join("tmp", rng.NextFileName(16)) found, err := xf.PathExists(pathToFile) c.Assert(err, IsNil) for found { pathToFile = filepath.Join("tmp", rng.NextFileName(16)) found, err = xf.PathExists(pathToFile) c.Assert(err, IsNil) } f, err := os.OpenFile(pathToFile, os.O_CREATE|os.O_WRONLY, 0600) c.Assert(err, IsNil) // create K entries, saving them in a slice while writing them // to disk var entries []*LogEntry for i := 0; i < K; i++ { t, key, nodeID, src, path := s.makeEntryData(c, rng, whichSHA) entry, err := NewLogEntry(t, key, nodeID, src, path) c.Assert(err, IsNil) strEntry := entry.String() entries = append(entries, entry) var count int count, err = f.WriteString(strEntry + "\n") c.Assert(err, IsNil) c.Assert(count, Equals, len(strEntry)+1) } f.Close() c.Assert(len(entries), Equals, K) // use UpaxServer.LoadEntries to load the stuff in the file. m, err := xi.NewNewIDMap() c.Assert(err, IsNil) count, err := loadEntries(pathToFile, m, whichSHA) c.Assert(err, IsNil) c.Assert(count, Equals, K) // K entries loaded. for i := 0; i < K; i++ { var entry, eInMap *LogEntry var whatever interface{} entry = entries[i] key := entry.key whatever, err = m.Find(key) c.Assert(err, IsNil) c.Assert(whatever, NotNil) eInMap = whatever.(*LogEntry) // DEBUG // XXX NEED LogEntry.Equal() // END c.Assert(bytes.Equal(key, eInMap.key), Equals, true) } }
func NewMemCache(maxBytes uint64, maxItems uint) (mc *MemCache, err error) { idMap, err := xi.NewNewIDMap() if err == nil { mc = &MemCache{ maxBytes: maxBytes, maxItems: maxItems, idMap: idMap, } } return }
func (s *XLSuite) TestClusterClusterIHaveMgr(c *C) { if VERBOSITY > 0 { fmt.Println("TEST_CLUSTER_IHAVE_MGR") } rng := xr.MakeSimpleRNG() iHaveCh := make(chan IHaveObj) entries, err := xi.NewNewIDMap() c.Assert(err, IsNil) outMsgCh := make(chan *UpaxClusterMsg, 16) stopCh := make(chan bool) K := 3 + rng.Intn(14) keys := make([][]byte, K) for i := 0; i < K; i++ { keys[i] = make([]byte, 32) rng.NextBytes(keys[i]) if i < K/2 { err = entries.Insert(keys[i], &keys[i]) c.Assert(err, IsNil) } } obj := IHaveObj{keys} mgr, err := NewClusterIHaveMgr(iHaveCh, entries, outMsgCh, stopCh) c.Assert(err, IsNil) go mgr.Run() mgr.iHaveCh <- obj var msgs []*UpaxClusterMsg done := false for !done { select { case msg := <-outMsgCh: msgs = append(msgs, msg) case <-time.After(time.Millisecond): done = true break } } c.Assert(len(msgs), Equals, K-K/2) stopCh <- true }
func NewUpaxServer(ckPriv, skPriv *rsa.PrivateKey, cm *xcl.ClusterMember, whichSHA int) (us *UpaxServer, err error) { var ( count int lfs string // path to local file system f *os.File // file for debugging log pathToLog string logger *log.Logger uDir u.UI pathToU string entries *xi.IDMap ftLogFile *os.File pathToFTLog string // conventionally lfs/U/L ) if ckPriv == nil || ckPriv == nil { err = NilRSAKey } else if cm == nil { err = NilClusterMember } if err == nil { serverVersion, err = xu.ParseDecimalVersion(VERSION) } if err == nil { // whatever created cm should have created the local file system // and written the node configuration to // LFS/.xlattice/cluster.member.config. Let's make sure that // that exists before proceeding. lfs = cm.GetLFS() // This should be passed in opt.Logger pathToLog = filepath.Join(lfs, "log") f, err = os.OpenFile(pathToLog, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0640) if err == nil { logger = log.New(f, "", log.Ldate|log.Ltime) } pathToCfg := filepath.Join( filepath.Join(lfs, ".xlattice"), "cluster.member.config") var found bool found, err = xf.PathExists(pathToCfg) if err == nil && found == false { err = ClusterConfigNotFound } } if f != nil { defer f.Close() } if err == nil { // DEBUG fmt.Printf("creating directory tree in %s\n", lfs) // END pathToU = filepath.Join(lfs, "U") uDir, err = u.New(pathToU, u.DIR16x16, 0) } if err == nil { entries, err = xi.NewNewIDMap() // with default depth } if err == nil { var found bool pathToFTLog = filepath.Join(pathToU, "L") found, err = xf.PathExists(pathToFTLog) if err == nil { if found { fmt.Printf("ftLog file exists\n") count, err = loadEntries(pathToFTLog, entries, whichSHA) if err == nil { // reopen it 0600 for appending ftLogFile, err = os.OpenFile(pathToFTLog, os.O_WRONLY|os.O_APPEND, 0600) } } else { // open it for appending ftLogFile, err = os.OpenFile(pathToFTLog, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) } } } if err == nil { us = &UpaxServer{ DoneCh: make(chan bool, 2), PathToDebugLog: pathToLog, Logger: logger, uDir: uDir, entries: entries, ftLogFile: ftLogFile, pathToFTLog: pathToFTLog, entryCount: count, ckPriv: ckPriv, skPriv: skPriv, ClusterMember: *cm, } } return }
func ParseFromStrings(ss []string) (node *Node, rest []string, err error) { var line string var m *xi.IDMap bn, rest, err := ParseBNFromStrings(ss, "node") if err == nil { node = &Node{BaseNode: *bn} m, err = xi.NewNewIDMap() if err == nil { node.peerMap = m } } if err == nil { line, err = xc.NextNBLine(&rest) } if err == nil { parts := strings.Split(line, ": ") if parts[0] == "lfs" { node.lfs = strings.TrimSpace(parts[1]) } else { fmt.Println("MISSING LFS") err = NotASerializedNode } var ckPriv, skPriv *rsa.PrivateKey if err == nil { // move some of this into ExpectRSAPrivateKey() ! line, err = xc.NextNBLine(&rest) if err == nil { parts = strings.Split(line, ": ") if parts[0] == "ckPriv" && parts[1] == "-----BEGIN -----" { ckPriv, err = ExpectRSAPrivateKey(&rest) node.ckPriv = ckPriv } else { fmt.Println("MISSING OR ILL-FORMED COMMS_KEY") err = NotASerializedNode } } } // FOO if err == nil { // move some of this into ExpectRSAPrivateKey() ! line, err = xc.NextNBLine(&rest) if err == nil { parts = strings.Split(line, ": ") if parts[0] == "skPriv" && parts[1] == "-----BEGIN -----" { skPriv, err = ExpectRSAPrivateKey(&rest) node.skPriv = skPriv } else { fmt.Println("MISSING OR ILL-FORMED SIG_KEY") err = NotASerializedNode } } } // FOO // endPoints if err == nil { line, err = xc.NextNBLine(&rest) } if err == nil { if line == "endPoints {" { for err == nil { line, err = xc.NextNBLine(&rest) if err != nil { break } if line == "}" { // prepend := []string{line} // rest = append(prepend, rest...) break } var ep xt.EndPointI ep, err = xt.ParseEndPoint(line) if err != nil { break } _, err = node.AddEndPoint(ep) if err != nil { break } } } else { fmt.Println("MISSING END_POINTS BLOCK") fmt.Printf(" EXPECTED 'endPoints {', GOT: '%s'\n", line) err = NotASerializedNode } } // peers if err == nil { line, err = xc.NextNBLine(&rest) } if err == nil { if line == "peers {" { for { line = strings.TrimSpace(rest[0]) if line == "}" { // ZZZ break } var peer *Peer peer, rest, err = ParsePeerFromStrings(rest) if err != nil { break } _, err = node.AddPeer(peer) if err != nil { break } } } else { fmt.Println("MISSING PEERS BLOCK") fmt.Printf(" EXPECTED 'peers {', GOT: '%s'\n", line) err = NotASerializedNode } line, err = xc.NextNBLine(&rest) // discard the ZZZ } } // gateways, but not yet // XXX STUB XXX // expect closing brace for node { // XXX we need an expect(&rest) line, err = xc.NextNBLine(&rest) if err == nil { if line != "}" { fmt.Printf("extra text at end of node declaration: '%s'\n", line) } } } if err != nil { node = nil } return }
// XXX Creating a Node with a list of live connections seems nonsensical. func New(name string, id *xi.NodeID, lfs string, ckPriv, skPriv *rsa.PrivateKey, o []xo.OverlayI, e []xt.EndPointI, p []*Peer) (n *Node, err error) { // lfs should be a well-formed POSIX path; if the directory does // not exist we should create it. err = xf.CheckLFS(lfs, 0700) // The ckPriv is an RSA key used to encrypt short messages. if err == nil { if ckPriv == nil { ckPriv, err = rsa.GenerateKey(rand.Reader, 2048) } if err == nil { // The skPriv is an RSA key used to create digital signatures. if skPriv == nil { skPriv, err = rsa.GenerateKey(rand.Reader, 2048) } } } // The node communicates through its endpoints. These are // contained in overlays. If an endpoint in 127.0.0.0/8 // is in the list of endpoints, that overlay is automatically // added to the list of overlays with the name "localhost". // Other IPv4 endpoints are assumed to be in 0.0.0.0/0 // ("globalV4") unless there is another containing overlay // except that endpoints in private address space are treated // differently. Unless there is an overlay with a containing // address space, addresses in 10/8 are assigned to "privateA", // addresses in 172.16/12 are assigned to "privateB", and // any in 192.168/16 are assigned to "privateC". All of these // overlays are automatically created unless there is a // pre-existing overlay whose address range is the same as one // of these are contained within one of them. var ( endPoints []xt.EndPointI acceptors []xt.AcceptorI // each must share index with endPoint overlays []xo.OverlayI m *xi.IDMap peers []*Peer // an empty slice ) if err == nil { m, err = xi.NewNewIDMap() } if err == nil { if p != nil { count := len(p) for i := 0; i < count; i++ { err = m.Insert(p[i].GetNodeID().Value(), &p[i]) if err != nil { break } peers = append(peers, p[i]) } } } if err == nil { commsPubKey := &(*ckPriv).PublicKey sigPubKey := &(*skPriv).PublicKey var baseNode *BaseNode baseNode, err = NewBaseNode(name, id, commsPubKey, sigPubKey, overlays) if err == nil { n = &Node{ckPriv: ckPriv, skPriv: skPriv, acceptors: acceptors, endPoints: endPoints, peers: peers, gateways: nil, lfs: lfs, peerMap: m, BaseNode: *baseNode} if err == nil { if o != nil { count := len(o) for i := 0; i < count; i++ { overlays = append(overlays, o[i]) } } if e != nil { count := len(e) for i := 0; i < count; i++ { // _, err = addEndPoint(e[i], &endPoints, &acceptors, &overlays) _, err = n.AddEndPoint(e[i]) } } } } } return }