func Main(clusterName, attachAddr string, udpConn net.PacketConn, listener, webListener net.Listener) { logger := util.NewLogger("main") var err os.Error listenAddr := listener.Addr().String() outs := make(paxos.ChanPutCloserTo) cal := make(chan int) var cl *client.Client self := util.RandId() st := store.New() if attachAddr == "" { // we are the only node in a new cluster set(st, "/doozer/info/"+self+"/public-addr", listenAddr, store.Missing) set(st, "/doozer/info/"+self+"/hostname", os.Getenv("HOSTNAME"), store.Missing) set(st, "/doozer/members/"+self, listenAddr, store.Missing) set(st, "/doozer/slot/"+"1", self, store.Missing) set(st, "/doozer/leader", self, store.Missing) set(st, "/ping", "pong", store.Missing) close(cal) cl, err = client.Dial(listenAddr) if err != nil { panic(err) } } else { cl, err = client.Dial(attachAddr) if err != nil { panic(err) } path := "/doozer/info/" + self + "/public-addr" _, err = cl.Set(path, listenAddr, store.Clobber) if err != nil { panic(err) } path = "/doozer/info/" + self + "/hostname" _, err = cl.Set(path, os.Getenv("HOSTNAME"), store.Clobber) if err != nil { panic(err) } joinSeqn, snap, err := cl.Join(self, listenAddr) if err != nil { panic(err) } done := make(chan int) st.Ops <- store.Op{1, snap} go advanceUntil(cl, done) go func() { st.Sync(joinSeqn + alpha) close(done) activate(st, self, cl, cal) }() // TODO sink needs a way to pick up missing values if there are any // gaps in its sequence } mg := paxos.NewManager(self, alpha, st, outs) if attachAddr == "" { // Skip ahead alpha steps so that the registrar can provide a // meaningful cluster. n := <-st.Seqns for i := n + 1; i < n+alpha; i++ { st.Ops <- store.Op{i, store.Nop} } } go func() { <-cal go lock.Clean(st, mg) go session.Clean(st, mg) go member.Clean(st, mg) go gc.Pulse(self, st.Seqns, cl, pulseInterval) go gc.Clean(st) }() sv := &server.Server{udpConn, listenAddr, st, mg, self} go func() { cas := store.Missing for _ = range time.Tick(checkinInterval) { _, cas, err = cl.Checkin(self, cas) if err != nil { logger.Println(err) } } }() go func() { err := sv.Serve(listener, cal) if err != nil { panic(err) } }() if webListener != nil { web.Store = st web.ClusterName = clusterName go web.Serve(webListener) } sv.ServeUdp(outs) }
func Main(clusterName, attachAddr string, udpConn net.PacketConn, listener, webListener net.Listener, pulseInterval, fillDelay, kickTimeout int64) { listenAddr := listener.Addr().String() var activateSeqn int64 useSelf := make(chan bool, 1) self := randId() st := store.New() pr := &proposer{ seqns: make(chan int64, alpha), props: make(chan *consensus.Prop), st: st, } calSrv := func() { go lock.Clean(pr, st.Watch(lock.SessGlob)) go session.Clean(st, pr, time.Tick(sessionPollInterval)) go gc.Pulse(self, st.Seqns, pr, pulseInterval) go gc.Clean(st, 360000, time.Tick(1e9)) } if attachAddr == "" { // we are the only node in a new cluster set(st, "/ctl/node/"+self+"/addr", listenAddr, store.Missing) set(st, "/ctl/node/"+self+"/hostname", os.Getenv("HOSTNAME"), store.Missing) set(st, "/ctl/node/"+self+"/version", Version, store.Missing) set(st, "/ctl/cal/0", self, store.Missing) calSrv() close(useSelf) } else { cl := client.New("local", attachAddr) // TODO use real cluster name setC(cl, "/ctl/node/"+self+"/addr", listenAddr, store.Clobber) setC(cl, "/ctl/node/"+self+"/hostname", os.Getenv("HOSTNAME"), store.Clobber) setC(cl, "/ctl/node/"+self+"/version", Version, store.Clobber) rev, err := cl.Rev() if err != nil { panic(err) } walk, err := cl.Walk("/**", &rev, nil, nil) if err != nil { panic(err) } watch, err := cl.Watch("/**", rev+1) if err != nil { panic(err) } go follow(st.Ops, watch.C) follow(st.Ops, walk.C) st.Flush() ch, err := st.Wait(rev + 1) if err == nil { <-ch } go func() { activateSeqn = activate(st, self, cl) calSrv() advanceUntil(cl, st.Seqns, activateSeqn+alpha) err := watch.Cancel() if err != nil { panic(err) } close(useSelf) }() } start := <-st.Seqns cmw := st.Watch(store.Any) in := make(chan consensus.Packet, 50) out := make(chan consensus.Packet, 50) consensus.NewManager(self, start, alpha, in, out, st.Ops, pr.seqns, pr.props, cmw, fillDelay, st) if attachAddr == "" { // Skip ahead alpha steps so that the registrar can provide a // meaningful cluster. for i := start + 1; i < start+alpha+1; i++ { st.Ops <- store.Op{i, store.Nop} } } shun := make(chan string, 3) // sufficient for a cluster of 7 go member.Clean(shun, st, pr) sv := &server.Server{listenAddr, st, pr, self, alpha} go sv.Serve(listener, useSelf) if webListener != nil { web.Store = st web.ClusterName = clusterName go web.Serve(webListener) } go func() { for p := range out { addr, err := net.ResolveUDPAddr(p.Addr) if err != nil { log.Println(err) continue } n, err := udpConn.WriteTo(p.Data, addr) if err != nil { log.Println(err) continue } if n != len(p.Data) { log.Println("packet len too long:", len(p.Data)) continue } } }() lv := liveness{ timeout: kickTimeout, ival: kickTimeout / 2, times: make(map[string]int64), self: self, shun: shun, } for { t := time.Nanoseconds() buf := make([]byte, maxUDPLen) n, addr, err := udpConn.ReadFrom(buf) if err == os.EINVAL { return } if err != nil { log.Println(err) continue } buf = buf[:n] // Update liveness time stamp for this addr lv.times[addr.String()] = t lv.check(t) in <- consensus.Packet{addr.String(), buf} } }