// Main takes care of essentail setup of proposer, gc and server. func Main( clusterName string, name string, cl *doozer.Conn, listener net.Listener, webListener net.Listener, st *store.Store, history int64, ) { var ( canWrite = make(chan bool, 1) p = &proposer{ proposalc: make(chan proposal), store: st, } ) listenAddr := listener.Addr().String() hostname, err := os.Hostname() if err != nil { hostname = "unknown" } set(st, "/ctl/name", clusterName, store.Missing) set(st, "/ctl/node/"+name+"/addr", listenAddr, store.Missing) set(st, "/ctl/node/"+name+"/hostname", hostname, store.Missing) set(st, "/ctl/cal/0", name, store.Missing) if webListener != nil { web.Store = st web.ClusterName = clusterName go web.Serve(webListener) } // sequential handling of mutations go p.process() go gc.Clean(st, history, time.Tick(1e9)) canWrite <- true server.ListenAndServe(listener, canWrite, st, p, "", "", name) }
// Main has doc func Main( clusterName, self, buri, rwsk, rosk string, cl *doozer.Conn, udpConn *net.UDPConn, listener, webListener net.Listener, pulseInterval, fillDelay, kickTimeout, hi int64, st *store.Store, ) { listenAddr := listener.Addr().String() canWrite := make(chan bool, 1) in := make(chan consensus.Packet, 1024) pr := &proposer{ seqns: make(chan int64, alpha), props: make(chan *consensus.Prop), st: st, } calSrv := func(start int64) { go gc.Pulse(self, st.Seqns, pr, pulseInterval) go gc.Clean(st, hi, time.Tick(1e9)) var m consensus.Manager m.Self = self m.DefRev = start m.Alpha = alpha m.In = in m.Out = in m.Ops = st.Ops m.PSeqn = pr.seqns m.Props = pr.props m.TFill = fillDelay m.Store = st m.Ticker = time.Tick(10e6) go m.Run() } hostname, err := os.Hostname() if err != nil { hostname = "unknown" } if cl == nil { // we are the only node in a new cluster set(st, "/ctl/name", clusterName, store.Missing) set(st, "/ctl/node/"+self+"/addr", listenAddr, store.Missing) set(st, "/ctl/node/"+self+"/hostname", hostname, store.Missing) set(st, "/ctl/node/"+self+"/version", Version, store.Missing) set(st, "/ctl/cal/0", self, store.Missing) if buri == "" { set(st, "/ctl/ns/"+clusterName+"/"+self, listenAddr, store.Missing) } calSrv(<-st.Seqns) // Skip ahead alpha steps so that the registrar can provide a // meaningful cluster. for i := 0; i < alpha; i++ { st.Ops <- store.Op{1 + <-st.Seqns, store.Nop} } canWrite <- true go setReady(pr, self) } else { setC(cl, "/ctl/node/"+self+"/addr", listenAddr, store.Clobber) setC(cl, "/ctl/node/"+self+"/hostname", hostname, store.Clobber) setC(cl, "/ctl/node/"+self+"/version", Version, store.Clobber) rev, err := cl.Rev() if err != nil { panic(err) } stop := make(chan bool, 1) go follow(st, cl, rev+1, stop) err = doozer.Walk(cl, rev, "/", func(path string, f *doozer.FileInfo, e error) (err error) { if f.IsDir { return } if e != nil { return e } // store.Clobber is okay here because the event // has already passed through another store body, _, err := cl.Get(path, &f.Rev) if err != nil { return } mut := store.MustEncodeSet(path, string(body), store.Clobber) st.Ops <- store.Op{f.Rev, mut} return }) if err != nil { panic(err) } st.Flush() ch, err := st.Wait(store.Any, rev+1) if err == nil { <-ch } go func() { n := activate(st, self, cl) calSrv(n) advanceUntil(cl, st.Seqns, n+alpha) stop <- true canWrite <- true go setReady(pr, self) if buri != "" { b, err := doozer.DialUri(buri, "") if err != nil { panic(err) } setC( b, "/ctl/ns/"+clusterName+"/"+self, listenAddr, store.Missing, ) } }() } shun := make(chan string, 3) // sufficient for a cluster of 7 go member.Clean(shun, st, pr) go server.ListenAndServe(listener, canWrite, st, pr, rwsk, rosk, self) if rwsk == "" && rosk == "" && webListener != nil { web.Store = st web.ClusterName = clusterName go web.Serve(webListener) } select {} }