func parseDefaultConfig(l string) (uint32, error) { // If we have already seen a default config line, and n has a value then we // log.Fatal. if groupConfig.n != 0 { return 0, fmt.Errorf("Default config can only be defined once: %v", l) } l = strings.TrimSpace(l) conf := strings.Split(l, " ") // + in (fp % n + k) is optional. if !(len(conf) == 5 || len(conf) == 3) || conf[0] != "fp" || conf[1] != "%" { return 0, fmt.Errorf("Default config format should be like: %v", "default: fp % n + k") } var err error var n uint64 n, err = strconv.ParseUint(conf[2], 10, 32) x.Check(err) groupConfig.n = uint32(n) x.AssertTrue(groupConfig.n != 0) if len(conf) == 5 { if conf[3] != "+" { return 0, fmt.Errorf("Default config format should be like: %v", "default: fp % n + k") } n, err = strconv.ParseUint(conf[4], 10, 32) groupConfig.k = uint32(n) x.Check(err) } return groupConfig.k, nil }
func toRDF(buf *bytes.Buffer, item kv) { pl := item.list for _, p := range pl.Postings { x.Check2(buf.WriteString(item.prefix)) if p.Uid == math.MaxUint64 && !bytes.Equal(p.Value, nil) { // Value posting // Convert to appropriate type typ := stype.ValueForType(stype.TypeID(p.ValType)) x.Check(typ.UnmarshalBinary(p.Value)) str, err := typ.MarshalText() x.Check(err) x.Check2(buf.WriteString(fmt.Sprintf("\"%s\"", str))) if p.ValType == uint32(stype.GeoID) { x.Check2(buf.WriteString(fmt.Sprintf("^^<geo:geojson> "))) } else if p.ValType != uint32(stype.BytesID) { x.Check2(buf.WriteString(fmt.Sprintf("^^<xs:%s> ", typ.Type().Name))) } x.Check2(buf.WriteString(" .\n")) return } x.Check2(buf.WriteString(fmt.Sprintf("<_uid_:%#x> .\n", p.Uid))) } }
func main() { flag.Parse() f, err := os.Open(*geoData) x.Check(err) gr, err := gzip.NewReader(f) x.Check(err) //var strBuf bytes.Buffer //bufReader := bufio.NewReader(gr) dec := json.NewDecoder(gr) countryToGeo := make(map[string]string) findFeatureArray(dec) for dec.More() { var f geojson.Feature err := dec.Decode(&f) fmt.Println(f.Properties["NAME_LONG"]) gg, err := geojson.Marshal(f.Geometry) ggg := strings.Replace(string(gg), "\"", "'", -1) country, ok := f.Properties["NAME_LONG"].(string) if ok { countryToGeo[country] = ggg } //fmt.Printf("\"%s\"", ggg) if err != nil { fmt.Println(err) } } gr.Close() f.Close() f, err = os.Open(*rdf) x.Check(err) gr, err = gzip.NewReader(f) x.Check(err) scanner := bufio.NewScanner(gr) out, err := os.Create("countryGeoData") x.Check(err) defer out.Close() count1, count2 := 0, 0 for scanner.Scan() { line := scanner.Text() if strings.Contains(line, "@en") { items := strings.Split(line, "\t") country := strings.Trim(strings.Split(items[2], "@")[0], "\"") fmt.Println(country) if geoD, ok := countryToGeo[country]; ok { count1++ out.WriteString(fmt.Sprintf("%s <loc> \"%s\"^^<geo:geojson> .\n", items[0], geoD)) } else { count2++ } } } fmt.Println(count1, count2) }
func makeRequest(mutation chan string, c *uint64, wg *sync.WaitGroup) { var counter uint64 for m := range mutation { counter = atomic.AddUint64(c, 1) if counter%100 == 0 { fmt.Printf("Request: %v\n", counter) } RETRY: req, err := http.NewRequest("POST", *dgraph, strings.NewReader(body(m))) x.Check(err) res, err := hc.Do(req) if err != nil { fmt.Printf("Retrying req: %d. Error: %v\n", counter, err) time.Sleep(5 * time.Millisecond) goto RETRY } body, err := ioutil.ReadAll(res.Body) x.Check(err) x.Check(json.Unmarshal(body, &r)) if r.Code != "ErrorOk" { log.Fatalf("Error while performing mutation: %v, err: %v", m, r.Message) } } wg.Done() }
func makeRequests(mutation chan string, wg *sync.WaitGroup) { for m := range mutation { counter := atomic.AddUint64(&s.mutations, 1) if counter%100 == 0 { num := atomic.LoadUint64(&s.rdfs) dur := time.Since(s.start) rate := float64(num) / dur.Seconds() fmt.Printf("[Request: %6d] Total RDFs done: %8d RDFs per second: %7.0f\r", counter, num, rate) } RETRY: req, err := http.NewRequest("POST", *dgraph, strings.NewReader(body(m))) x.Check(err) res, err := hc.Do(req) if err != nil { fmt.Printf("Retrying req: %d. Error: %v\n", counter, err) time.Sleep(5 * time.Millisecond) goto RETRY } body, err := ioutil.ReadAll(res.Body) x.Check(err) if err = json.Unmarshal(body, &r); err != nil { // Not doing x.Checkf(json.Unmarshal..., "Response..", string(body)) // to ensure that we don't try to convert body from []byte to string // when there's no errors. x.Checkf(err, "HTTP Status: %s Response body: %s.", http.StatusText(res.StatusCode), string(body)) } if r.Code != "ErrorOk" { log.Fatalf("Error while performing mutation: %v, err: %v", m, r.Message) } } wg.Done() }
func convertToEdges(ctx context.Context, nquads []rdf.NQuad) (mutationResult, error) { var edges []*task.DirectedEdge var mr mutationResult newUids := make(map[string]uint64) for _, nq := range nquads { if strings.HasPrefix(nq.Subject, "_new_:") { newUids[nq.Subject] = 0 } else if !strings.HasPrefix(nq.Subject, "_uid_:") { uid, err := rdf.GetUid(nq.Subject) x.Check(err) newUids[nq.Subject] = uid } if len(nq.ObjectId) > 0 { if strings.HasPrefix(nq.ObjectId, "_new_:") { newUids[nq.ObjectId] = 0 } else if !strings.HasPrefix(nq.ObjectId, "_uid_:") { uid, err := rdf.GetUid(nq.ObjectId) x.Check(err) newUids[nq.ObjectId] = uid } } } if len(newUids) > 0 { if err := worker.AssignUidsOverNetwork(ctx, newUids); err != nil { x.TraceError(ctx, x.Wrapf(err, "Error while GetOrAssignUidsOverNetwork")) return mr, err } } for _, nq := range nquads { // Get edges from nquad using newUids. edge, err := nq.ToEdgeUsing(newUids) if err != nil { x.TraceError(ctx, x.Wrapf(err, "Error while converting to edge: %v", nq)) return mr, err } edges = append(edges, edge) } resultUids := make(map[string]uint64) // Strip out _new_: prefix from the keys. for k, v := range newUids { if strings.HasPrefix(k, "_new_:") { resultUids[k[6:]] = v } } mr = mutationResult{ edges: edges, newUids: resultUids, } return mr, nil }
// ParseConfig parses a group config provided by reader. func ParseConfig(r io.Reader) error { groupConfig = config{} scanner := bufio.NewScanner(r) // To keep track of last groupId seen across lines. If we the groups ids are // not sequential, we log.Fatal. var curGroupId uint32 // If after seeing line with default config, we see other lines, we log.Fatal. // Default config should be specified as the last line, so that we can check // accurately that k in (fp % N + k) generates consecutive groups. seenDefault := false for scanner.Scan() { l := scanner.Text() // Skip empty lines and comments. if l == "" || strings.HasPrefix(l, "//") { continue } c := strings.Split(l, ":") if len(c) < 2 { return fmt.Errorf("Incorrect format for config line: %v", l) } if c[0] == "default" { seenDefault = true k, err := parseDefaultConfig(c[1]) if err != nil { return err } if k == 0 { continue } if k > curGroupId { return fmt.Errorf("k in (fp mod N + k) should be <= the last groupno %v.", curGroupId) } } else { // There shouldn't be a line after the default config line. if seenDefault { return fmt.Errorf("Default config should be specified as the last line. Found %v", l) } groupId, err := strconv.ParseUint(c[0], 10, 32) x.Check(err) if curGroupId != uint32(groupId) { return fmt.Errorf("Group ids should be sequential and should start from 0. "+ "Found %v, should have been %v", groupId, curGroupId) } curGroupId++ err = parsePredicates(uint32(groupId), c[1]) if err != nil { return err } } } x.Check(scanner.Err()) return nil }
func main() { flag.Parse() logrus.SetLevel(logrus.DebugLevel) var srcl, dstl []R f, bufReader := getReader(*src) var err error srcCount := 0 var strBuf bytes.Buffer for { err = x.ReadLine(bufReader, &strBuf) if err != nil { break } srcCount++ rnq, err := rdf.Parse(strBuf.String()) x.Checkf(err, "Unable to parse line: [%v]", strBuf.String()) srcl = append(srcl, convert(rnq)) } if err != nil && err != io.EOF { err := x.Errorf("Error while reading file: %v", err) log.Fatalf("%+v", err) } x.Check(f.Close()) fmt.Println("Source done") f, bufReader = getReader(*dst) dstCount := 0 for { err = x.ReadLine(bufReader, &strBuf) if err != nil { break } dstCount++ rnq, err := rdf.Parse(strBuf.String()) x.Checkf(err, "Unable to parse line: [%v]", strBuf.String()) dstl = append(dstl, convert(rnq)) } if err != nil && err != io.EOF { err := x.Errorf("Error while reading file: %v", err) log.Fatalf("%+v", err) } x.Check(f.Close()) fmt.Printf("Src: [%d] Dst: [%d]\n", srcCount, dstCount) sort.Sort(ByR(srcl)) sort.Sort(ByR(dstl)) fmt.Println("Comparing now") //for i := 0; i < 100; i++ { //fmt.Printf("[S,D] %v %v\n", srcl[i], dstl[i]) //} compare(srcl, dstl) }
func checkFlagsAndInitDirs() { numCpus := *numcpu if len(*cpuprofile) > 0 { f, err := os.Create(*cpuprofile) x.Check(err) pprof.StartCPUProfile(f) } prev := runtime.GOMAXPROCS(numCpus) log.Printf("num_cpu: %v. prev_maxprocs: %v. Set max procs to num cpus", numCpus, prev) // Create parent directories for postings, uids and mutations x.Check(os.MkdirAll(*postingDir, 0700)) }
func main() { flag.Parse() f, err := os.Open(*file) x.Check(err) defer f.Close() gr, err := gzip.NewReader(f) x.Check(err) hc = http.Client{Timeout: time.Minute} mutation := make(chan string, 3*(*concurrent)) var count uint64 = 0 var wg sync.WaitGroup for i := 0; i < *concurrent; i++ { wg.Add(1) go makeRequest(mutation, &count, &wg) } var buf bytes.Buffer bufReader := bufio.NewReader(gr) num := 0 var rdfCount uint64 = 0 for { err = readLine(bufReader, &buf) if err != nil { break } buf.WriteRune('\n') if num >= *numRdf { mutation <- buf.String() buf.Reset() num = 0 } rdfCount++ num++ } if err != io.EOF { log.Fatalf("Error while reading file: %+v", err) } if buf.Len() > 0 { mutation <- buf.String() } close(mutation) wg.Wait() fmt.Println("Number of RDF's parsed: ", rdfCount) fmt.Println("Number of mutations run: ", count) }
// stringHelper does simple DFS to convert FilterTree to string. func (t *FilterTree) stringHelper(buf *bytes.Buffer) { x.AssertTrue(t != nil) if t.Func != nil && len(t.Func.Name) > 0 { // Leaf node. _, err := buf.WriteRune('(') x.Check(err) _, err = buf.WriteString(t.Func.Name) x.Check(err) if len(t.Func.Attr) > 0 { args := make([]string, len(t.Func.Args)+1) args[0] = t.Func.Attr copy(args[1:], t.Func.Args) for _, arg := range args { _, err = buf.WriteString(" \"") x.Check(err) _, err = buf.WriteString(arg) x.Check(err) _, err := buf.WriteRune('"') x.Check(err) } } _, err = buf.WriteRune(')') x.Check(err) return } // Non-leaf node. _, err := buf.WriteRune('(') x.Check(err) switch t.Op { case "&": _, err = buf.WriteString("AND") case "|": _, err = buf.WriteString("OR") default: err = x.Errorf("Unknown operator: %q", t.Op) } x.Check(err) for _, c := range t.Child { _, err = buf.WriteRune(' ') x.Check(err) c.stringHelper(buf) } _, err = buf.WriteRune(')') x.Check(err) }
func (p *poolsi) connect(addr string) { if addr == *myAddr { return } p.RLock() _, has := p.all[addr] p.RUnlock() if has { return } pool := newPool(addr, 5) query := new(Payload) query.Data = make([]byte, 10) x.Check2(rand.Read(query.Data)) conn, err := pool.Get() x.Checkf(err, "Unable to connect") c := NewWorkerClient(conn) resp, err := c.Echo(context.Background(), query) x.Checkf(err, "Unable to Echo") x.AssertTrue(bytes.Equal(resp.Data, query.Data)) x.Check(pool.Put(conn)) fmt.Printf("Connection with %q successful.\n", addr) p.Lock() defer p.Unlock() _, has = p.all[addr] if has { return } p.all[addr] = pool }
func main() { rand.Seed(time.Now().UnixNano()) x.Init() checkFlagsAndInitDirs() ps, err := store.NewStore(*postingDir) x.Checkf(err, "Error initializing postings store") defer ps.Close() if len(*schemaFile) > 0 { err = schema.Parse(*schemaFile) x.Checkf(err, "Error while loading schema: %s", *schemaFile) } // Posting will initialize index which requires schema. Hence, initialize // schema before calling posting.Init(). posting.Init(ps) worker.Init(ps) x.Check(group.ParseGroupConfig(*conf)) // Setup external communication. che := make(chan error, 1) go setupServer(che) go worker.StartRaftNodes(*walDir) if err := <-che; !strings.Contains(err.Error(), "use of closed network connection") { log.Fatal(err) } }
// InitAndStartNode gets called after having at least one membership sync with the cluster. func (n *node) InitAndStartNode(wal *raftwal.Wal) { restart, err := n.initFromWal(wal) x.Check(err) if restart { fmt.Printf("RESTARTING\n") n.raft = raft.RestartNode(n.cfg) } else { if groups().HasPeer(n.gid) { n.joinPeers() n.raft = raft.StartNode(n.cfg, nil) } else { peers := []raft.Peer{{ID: n.id}} n.raft = raft.StartNode(n.cfg, peers) // Trigger election, so this node can become the leader of this single-node cluster. n.canCampaign = true } } go n.processCommitCh() go n.Run() // TODO: Find a better way to snapshot, so we don't lose the membership // state information, which isn't persisted. // go n.snapshotPeriodically() go n.batchAndSendMessages() }
func (w *grpcWorker) RaftMessage(ctx context.Context, query *Payload) (*Payload, error) { if ctx.Err() != nil { return &Payload{}, ctx.Err() } for idx := 0; idx < len(query.Data); { sz := int(binary.LittleEndian.Uint32(query.Data[idx : idx+4])) idx += 4 msg := raftpb.Message{} if idx+sz-1 > len(query.Data) { return &Payload{}, x.Errorf( "Invalid query. Size specified: %v. Size of array: %v\n", sz, len(query.Data)) } if err := msg.Unmarshal(query.Data[idx : idx+sz]); err != nil { x.Check(err) } if msg.Type != raftpb.MsgHeartbeat && msg.Type != raftpb.MsgHeartbeatResp { fmt.Printf("RECEIVED: %v %v-->%v\n", msg.Type, msg.From, msg.To) } if err := w.applyMessage(ctx, msg); err != nil { return &Payload{}, err } idx += sz } // fmt.Printf("Got %d messages\n", count) return &Payload{}, nil }
func (n *node) doSendMessage(to uint64, data []byte) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() addr := n.peers.Get(to) if len(addr) == 0 { return } pool := pools().get(addr) conn, err := pool.Get() x.Check(err) defer pool.Put(conn) c := NewWorkerClient(conn) p := &Payload{Data: data} ch := make(chan error, 1) go func() { _, err = c.RaftMessage(ctx, p) ch <- err }() select { case <-ctx.Done(): return case <-ch: // We don't need to do anything if we receive any error while sending message. // RAFT would automatically retry. return } }
func doFilterString() { r := expand([]uint64{15161013152876854722}, "film.director.film") r = expand(r, "film.film.directed_by") r = expand(r, "film.director.film") fmt.Printf("Without filter: %d\n", len(r)) var numHits int for _, u := range r { name, found := gNames[u] if !found { continue } tmp, err := normalize([]byte(name)) x.Check(err) name = string(tmp) tokens := strings.Split(name, " ") var found1, found2 bool for _, t := range tokens { if t == "the" { found1 = true } else if t == "a" { found2 = true } } if found1 || found2 { numHits++ } } fmt.Printf("With filter: %d\n", numHits) }
func writeToFile(fpath string, ch chan []byte) error { f, err := os.Create(fpath) if err != nil { return err } defer f.Close() x.Check(err) w := bufio.NewWriterSize(f, 1000000) gw, err := gzip.NewWriterLevel(w, gzip.BestCompression) if err != nil { return err } for buf := range ch { if _, err := gw.Write(buf); err != nil { return err } } if err := gw.Flush(); err != nil { return err } if err := gw.Close(); err != nil { return err } return w.Flush() }
func compare(srcl, dstl []R) { var buf bytes.Buffer var ps, ds, loop, matches int for ps < len(srcl) && ds < len(dstl) { loop++ if loop%100 == 0 { //fmt.Printf(".") } s := srcl[ps] d := dstl[ds] if equal(s, d) { //fmt.Printf("Found match: %v\n", s) matches++ ps++ ds++ } else if less(s, d) { // s < d, advance s buf.WriteString(s.String()) buf.WriteRune('\n') ps++ } else { ds++ } } fmt.Printf("Found matches: %v\n", matches) x.Check(ioutil.WriteFile(*out, buf.Bytes(), 0644)) }
func generateGroup(groupId uint32) (*task.GroupKeys, error) { it := pstore.NewIterator() defer it.Close() g := &task.GroupKeys{ GroupId: groupId, } for it.SeekToFirst(); it.Valid(); it.Next() { k, v := it.Key(), it.Value() pk := x.Parse(k.Data()) if pk == nil { continue } if group.BelongsTo(pk.Attr) != g.GroupId { it.Seek(pk.SkipPredicate()) it.Prev() // To tackle it.Next() called by default. continue } var pl types.PostingList x.Check(pl.Unmarshal(v.Data())) kdup := make([]byte, len(k.Data())) copy(kdup, k.Data()) key := &task.KC{ Key: kdup, Checksum: pl.Checksum, } g.Keys = append(g.Keys, key) } return g, it.Err() }
func (n *node) processCommitCh() { pending := make(chan struct{}, numPendingMutations) for e := range n.commitCh { if e.Data == nil { continue } if e.Type == raftpb.EntryConfChange { var cc raftpb.ConfChange cc.Unmarshal(e.Data) if len(cc.Context) > 0 { var rc task.RaftContext x.Check(rc.Unmarshal(cc.Context)) n.Connect(rc.Id, rc.Addr) } n.raft.ApplyConfChange(cc) } else { go n.process(e, pending) } } }
// processFile sends mutations for a given gz file. func processFile(file string) { fmt.Printf("\nProcessing %s\n", file) f, err := os.Open(file) x.Check(err) defer f.Close() gr, err := gzip.NewReader(f) x.Check(err) hc = http.Client{Timeout: time.Minute} mutation := make(chan string, 3*(*concurrent)) var wg sync.WaitGroup for i := 0; i < *concurrent; i++ { wg.Add(1) go makeRequests(mutation, &wg) } var buf bytes.Buffer bufReader := bufio.NewReader(gr) num := 0 for { err = readLine(bufReader, &buf) if err != nil { break } buf.WriteRune('\n') atomic.AddUint64(&s.rdfs, 1) num++ if num >= *numRdf { mutation <- buf.String() buf.Reset() num = 0 } } if err != io.EOF { x.Checkf(err, "Error while reading file") } if buf.Len() > 0 { mutation <- buf.String() } close(mutation) wg.Wait() }
func (n *node) Run() { firstRun := true ticker := time.NewTicker(time.Second) for { select { case <-ticker.C: n.raft.Tick() case rd := <-n.raft.Ready(): x.Check(n.wal.Store(n.gid, rd.Snapshot, rd.HardState, rd.Entries)) n.saveToStorage(rd.Snapshot, rd.HardState, rd.Entries) rcBytes, err := n.raftContext.Marshal() for _, msg := range rd.Messages { // NOTE: We can do some optimizations here to drop messages. x.Check(err) msg.Context = rcBytes n.send(msg) } if !raft.IsEmptySnap(rd.Snapshot) { n.processSnapshot(rd.Snapshot) } if len(rd.CommittedEntries) > 0 { x.Trace(n.ctx, "Found %d committed entries", len(rd.CommittedEntries)) } for _, entry := range rd.CommittedEntries { // Just queue up to be processed. Don't wait on them. n.commitCh <- entry } n.raft.Advance() if firstRun && n.canCampaign { go n.raft.Campaign(n.ctx) firstRun = false } case <-n.done: return } } }
func (n *node) send(m raftpb.Message) { x.AssertTruef(n.id != m.To, "Seding message to itself") data, err := m.Marshal() x.Check(err) if m.Type != raftpb.MsgHeartbeat && m.Type != raftpb.MsgHeartbeatResp { fmt.Printf("\t\tSENDING: %v %v-->%v\n", m.Type, m.From, m.To) } select { case n.messages <- sendmsg{to: m.To, data: data}: // pass default: log.Fatalf("Unable to push messages to channel in send") } }
func (w *grpcWorker) applyMessage(ctx context.Context, msg raftpb.Message) error { var rc task.RaftContext x.Check(rc.Unmarshal(msg.Context)) node := groups().Node(rc.Group) node.Connect(msg.From, rc.Addr) c := make(chan error, 1) go func() { c <- node.Step(ctx, msg) }() select { case <-ctx.Done(): return ctx.Err() case err := <-c: return err } }
// ParseGroupConfig parses the config file and stores the predicate <-> group map. func ParseGroupConfig(file string) error { cf, err := os.Open(file) if os.IsNotExist(err) { groupConfig.n = 1 groupConfig.k = 1 return nil } x.Check(err) if err = ParseConfig(cf); err != nil { return err } if groupConfig.n == 0 { return fmt.Errorf("Cant take modulo 0.") } return nil }
func (n *node) AddToCluster(ctx context.Context, pid uint64) error { addr := n.peers.Get(pid) x.AssertTruef(len(addr) > 0, "Unable to find conn pool for peer: %d", pid) rc := &task.RaftContext{ Addr: addr, Group: n.raftContext.Group, Id: pid, } rcBytes, err := rc.Marshal() x.Check(err) return n.raft.ProposeConfChange(ctx, raftpb.ConfChange{ ID: pid, Type: raftpb.ConfChangeAddNode, NodeID: pid, Context: rcBytes, }) }
func (n *node) process(e raftpb.Entry, pending chan struct{}) { if e.Type != raftpb.EntryNormal { return } pending <- struct{}{} // This will block until we can write to it. var proposal task.Proposal x.Check(proposal.Unmarshal(e.Data)) var err error if proposal.Mutations != nil { err = n.processMutation(e, proposal.Mutations) } else if proposal.Membership != nil { err = n.processMembership(e, proposal.Membership) } n.props.Done(proposal.Id, err) <-pending // Release one. }
func doGen() { var numHits int for _, name := range gNames { tmp, err := normalize([]byte(name)) x.Check(err) name = string(tmp) tokens := strings.Split(name, " ") var found1, found2 bool for _, t := range tokens { if t == "good" { found1 = true } else if t == "bad" { found2 = true } } if found1 || found2 { numHits++ } } fmt.Printf("num gen hits %d\n", numHits) }
func (n *node) joinPeers() { // Get leader information for MY group. pid, paddr := groups().Leader(n.gid) n.Connect(pid, paddr) fmt.Printf("Connected with: %v\n", paddr) addr := n.peers.Get(pid) pool := pools().get(addr) x.AssertTruef(pool != nil, "Unable to find addr for peer: %d", pid) // Bring the instance up to speed first. _, err := populateShard(n.ctx, pool, 0) x.Checkf(err, "Error while populating shard") conn, err := pool.Get() x.Check(err) defer pool.Put(conn) c := NewWorkerClient(conn) x.Printf("Calling JoinCluster") _, err = c.JoinCluster(n.ctx, n.raftContext) x.Checkf(err, "Error while joining cluster") x.Printf("Done with JoinCluster call\n") }