func dial() *doozer.Conn { c, err := doozer.DialUri(*uri, *buri) if err != nil { bail(err) } return c }
func runReplicate(cmd *Command) error { if len(cmd.Args) != 1 { cmd.Usage() } var ( dst = cmd.Conn root = cmd.Globals.Root errc = make(chan error) entryc = make(chan *journal.Entry) srcURI = cmd.Args[0] ) // connect to source src, err := doozer.DialUri(srcURI, "") if err != nil { return fmt.Errorf("unable to connect to source '%s': %s", srcURI, err) } // get src rev for snapshot and streaming updates rev, err := src.Rev() if err != nil { return fmt.Errorf("unable to get rev from source '%s': %s", srcURI, err) } // check for lock existance _, prev, err := dst.Stat(lockPath, &cmd.Rev) if err != nil { return fmt.Errorf("unable to check for lock '%s': %s", lockPath, err) } if prev != 0 { return fmt.Errorf("lock exists") } // aquire lock _, err = dst.Set(lockPath, cmd.Rev, []byte(time.Now().String())) if err != nil { return fmt.Errorf("unable to set lock '%s': %s", lockPath, err) } // start writer go writer(dst, root, entryc, errc) // snapshot by walking src tree err = doozer.Walk(src, rev, root, walk(src, rev, entryc)) if err != nil { return fmt.Errorf("unable to walk source '%s': %s", srcURI, err) } // stream updates go wait(src, rev, root, entryc, errc) return <-errc }
// DialUri calls doozer.DialUri and returns a Snapshot of the coordinator cluster // at the latest revision. func DialUri(uri string, root string) (s Snapshot, err error) { dconn, err := doozer.DialUri(uri, "") if err != nil { return } rev, err := dconn.Rev() if err != nil { return } s = Snapshot{rev, &conn{uri, root, dconn}} return }
// DialUri calls doozer.DialUri and returns a Snapshot // of the coordinator cluster at the latest revision. func DialUri(uri string, root string) (Snapshot, error) { dconn, err := doozer.DialUri(uri, "") if err != nil { return Snapshot{}, err } rev, err := dconn.Rev() if err != nil { return Snapshot{}, err } c := &conn{uri, root, dconn} go c.heartbeat(DefaultHeartbeatInterval, DefaultHeartbeatTimeout) return Snapshot{rev, c}, nil }
func main() { var ( file = flag.String("file", "./doozerd.log", "location of the journal file") root = flag.String("root", "/", "root to operate from") uri = flag.String("uri", "doozer:?ca=localhost:8046", "doozerd cluster uri") ) flag.Parse() globals := Globals{ File: *file, Root: *root, URI: *uri, } flag.Usage = usage(globals) if len(flag.Args()) < 1 { flag.Usage() } for _, cmd := range commands { if cmd.Name == flag.Args()[0] && cmd.Run != nil { conn, err := doozer.DialUri(*uri, "") if err != nil { log.Fatalf("Error connecting to %s: %s\n", *uri, err) } rev, err := conn.Rev() if err != nil { log.Fatalf("Unable to get revision: %s\n", err) } cmd.Args = flag.Args()[1:] cmd.Conn = conn cmd.Globals = globals cmd.Rev = rev if err := cmd.Run(cmd); err != nil { log.Fatal(err) } return } } fmt.Fprintf(os.Stderr, "Unknown command %#q\n\n", flag.Args()[0]) flag.Usage() }
func boot(name, id, laddr, buri string) *doozer.Conn { b, err := doozer.DialUri(buri, "") if err != nil { panic(err) } err = b.Access(rwsk) if err != nil { panic(err) } cl := lookupAndAttach(b, name) if cl == nil { return elect(name, id, laddr, b) } return cl }
// Main has doc func Main( clusterName, self, buri, rwsk, rosk string, cl *doozer.Conn, udpConn *net.UDPConn, listener, webListener net.Listener, pulseInterval, fillDelay, kickTimeout, hi int64, st *store.Store, ) { listenAddr := listener.Addr().String() canWrite := make(chan bool, 1) in := make(chan consensus.Packet, 1024) pr := &proposer{ seqns: make(chan int64, alpha), props: make(chan *consensus.Prop), st: st, } calSrv := func(start int64) { go gc.Pulse(self, st.Seqns, pr, pulseInterval) go gc.Clean(st, hi, time.Tick(1e9)) var m consensus.Manager m.Self = self m.DefRev = start m.Alpha = alpha m.In = in m.Out = in m.Ops = st.Ops m.PSeqn = pr.seqns m.Props = pr.props m.TFill = fillDelay m.Store = st m.Ticker = time.Tick(10e6) go m.Run() } hostname, err := os.Hostname() if err != nil { hostname = "unknown" } if cl == nil { // we are the only node in a new cluster set(st, "/ctl/name", clusterName, store.Missing) set(st, "/ctl/node/"+self+"/addr", listenAddr, store.Missing) set(st, "/ctl/node/"+self+"/hostname", hostname, store.Missing) set(st, "/ctl/node/"+self+"/version", Version, store.Missing) set(st, "/ctl/cal/0", self, store.Missing) if buri == "" { set(st, "/ctl/ns/"+clusterName+"/"+self, listenAddr, store.Missing) } calSrv(<-st.Seqns) // Skip ahead alpha steps so that the registrar can provide a // meaningful cluster. for i := 0; i < alpha; i++ { st.Ops <- store.Op{1 + <-st.Seqns, store.Nop} } canWrite <- true go setReady(pr, self) } else { setC(cl, "/ctl/node/"+self+"/addr", listenAddr, store.Clobber) setC(cl, "/ctl/node/"+self+"/hostname", hostname, store.Clobber) setC(cl, "/ctl/node/"+self+"/version", Version, store.Clobber) rev, err := cl.Rev() if err != nil { panic(err) } stop := make(chan bool, 1) go follow(st, cl, rev+1, stop) err = doozer.Walk(cl, rev, "/", func(path string, f *doozer.FileInfo, e error) (err error) { if f.IsDir { return } if e != nil { return e } // store.Clobber is okay here because the event // has already passed through another store body, _, err := cl.Get(path, &f.Rev) if err != nil { return } mut := store.MustEncodeSet(path, string(body), store.Clobber) st.Ops <- store.Op{f.Rev, mut} return }) if err != nil { panic(err) } st.Flush() ch, err := st.Wait(store.Any, rev+1) if err == nil { <-ch } go func() { n := activate(st, self, cl) calSrv(n) advanceUntil(cl, st.Seqns, n+alpha) stop <- true canWrite <- true go setReady(pr, self) if buri != "" { b, err := doozer.DialUri(buri, "") if err != nil { panic(err) } setC( b, "/ctl/ns/"+clusterName+"/"+self, listenAddr, store.Missing, ) } }() } shun := make(chan string, 3) // sufficient for a cluster of 7 go member.Clean(shun, st, pr) go server.ListenAndServe(listener, canWrite, st, pr, rwsk, rosk, self) if rwsk == "" && rosk == "" && webListener != nil { web.Store = st web.ClusterName = clusterName go web.Serve(webListener) } select {} }