Beispiel #1
0
func find(path string) {
	c := dial()

	if *rrev == -1 {
		var err error
		*rrev, err = c.Rev()
		if err != nil {
			bail(err)
		}
	}

	v := make(vis)
	errs := make(chan error)
	go func() {
		doozer.Walk(c, *rrev, path, v, errs)
		close(v)
	}()

	for {
		select {
		case path, ok := <-v:
			if !ok {
				return
			}
			fmt.Println(path)
		case err := <-errs:
			fmt.Fprintln(os.Stderr, err)
		}
	}
}
Beispiel #2
0
func Main(clusterName, self, buri, rwsk, rosk string, cl *doozer.Conn, udpConn *net.UDPConn, listener, webListener net.Listener, pulseInterval, fillDelay, kickTimeout int64, hi int64) {
	listenAddr := listener.Addr().String()

	canWrite := make(chan bool, 1)
	in := make(chan consensus.Packet, 50)
	out := make(chan consensus.Packet, 50)

	st := store.New()
	pr := &proposer{
		seqns: make(chan int64, alpha),
		props: make(chan *consensus.Prop),
		st:    st,
	}

	calSrv := func(start int64) {
		go gc.Pulse(self, st.Seqns, pr, pulseInterval)
		go gc.Clean(st, hi, time.Tick(1e9))
		var m consensus.Manager
		m.Self = self
		m.DefRev = start
		m.Alpha = alpha
		m.In = in
		m.Out = out
		m.Ops = st.Ops
		m.PSeqn = pr.seqns
		m.Props = pr.props
		m.TFill = fillDelay
		m.Store = st
		m.Ticker = time.Tick(10e6)
		go m.Run()
	}

	hostname, err := os.Hostname()
	if err != nil {
		hostname = "unknown"
	}

	if cl == nil { // we are the only node in a new cluster
		set(st, "/ctl/name", clusterName, store.Missing)
		set(st, "/ctl/node/"+self+"/addr", listenAddr, store.Missing)
		set(st, "/ctl/node/"+self+"/hostname", hostname, store.Missing)
		set(st, "/ctl/node/"+self+"/version", Version, store.Missing)
		set(st, "/ctl/cal/0", self, store.Missing)
		if buri == "" {
			set(st, "/ctl/ns/"+clusterName+"/"+self, listenAddr, store.Missing)
		}
		calSrv(<-st.Seqns)
		// Skip ahead alpha steps so that the registrar can provide a
		// meaningful cluster.
		for i := 0; i < alpha; i++ {
			st.Ops <- store.Op{1 + <-st.Seqns, store.Nop}
		}
		canWrite <- true
		go setReady(pr, self)
	} else {
		setC(cl, "/ctl/node/"+self+"/addr", listenAddr, store.Clobber)
		setC(cl, "/ctl/node/"+self+"/hostname", hostname, store.Clobber)
		setC(cl, "/ctl/node/"+self+"/version", Version, store.Clobber)

		rev, err := cl.Rev()
		if err != nil {
			panic(err)
		}

		stop := make(chan bool, 1)
		go follow(st, cl, rev+1, stop)

		errs := make(chan error)
		go func() {
			e, ok := <-errs
			if ok {
				panic(e)
			}
		}()
		doozer.Walk(cl, rev, "/", cloner{st.Ops, cl, rev}, errs)
		close(errs)
		st.Flush()

		ch, err := st.Wait(store.Any, rev+1)
		if err == nil {
			<-ch
		}

		go func() {
			n := activate(st, self, cl)
			calSrv(n)
			advanceUntil(cl, st.Seqns, n+alpha)
			stop <- true
			canWrite <- true
			go setReady(pr, self)
			if buri != "" {
				b, err := doozer.DialUri(buri, "")
				if err != nil {
					panic(err)
				}
				setC(
					b,
					"/ctl/ns/"+clusterName+"/"+self,
					listenAddr,
					store.Missing,
				)
			}
		}()
	}

	shun := make(chan string, 3) // sufficient for a cluster of 7
	go member.Clean(shun, st, pr)
	go server.ListenAndServe(listener, canWrite, st, pr, rwsk, rosk)

	if rwsk == "" && rosk == "" && webListener != nil {
		web.Store = st
		web.ClusterName = clusterName
		go web.Serve(webListener)
	}

	go func() {
		for p := range out {
			n, err := udpConn.WriteTo(p.Data, p.Addr)
			if err != nil {
				log.Println(err)
				continue
			}
			if n != len(p.Data) {
				log.Println("packet len too long:", len(p.Data))
				continue
			}
		}
	}()

	selfAddr, ok := udpConn.LocalAddr().(*net.UDPAddr)
	if !ok {
		panic("no UDP addr")
	}
	lv := liveness{
		timeout: kickTimeout,
		ival:    kickTimeout / 2,
		self:    selfAddr,
		shun:    shun,
	}
	for {
		t := time.Now().UnixNano()

		buf := make([]byte, maxUDPLen)
		n, addr, err := udpConn.ReadFromUDP(buf)
		if err == syscall.EINVAL {
			return
		}
		if err != nil {
			log.Println(err)
			continue
		}

		buf = buf[:n]

		lv.mark(addr, t)
		lv.check(t)

		in <- consensus.Packet{addr, buf}
	}
}
Beispiel #3
0
func (c *ServiceClient) monitorInstances() {
	// TODO: Let's watch doozer and keep this list up to date so we don't need to search it every time we spawn a new connection
	doozer := c.query.DoozerConn

	rev := doozer.GetCurrentRevision()

	ddir := c.query.makePath()

	var ifc instanceFileCollector
	errch := make(chan error)
	doozer.Walk(rev, ddir, &ifc, errch)
	select {
	case err := <-errch:
		c.Log.Item(err)
	default:
	}

	for _, file := range ifc.files {
		buf, _, err := doozer.Get(file, rev)
		if err != nil {
			c.Log.Item(err)
			continue
		}
		var s service.Service
		err = json.Unmarshal(buf, &s)
		if err != nil {
			c.Log.Item(err)
			continue
		}

		c.muxChan <- service.ServiceDiscovered{
			Service: &s,
		}
	}

	watchPath := path.Join(c.query.makePath(), "**")

	for {
		ev, err := doozer.Wait(watchPath, rev+1)
		rev = ev.Rev
		if err != nil {
			continue
		}

		var s service.Service

		buf := bytes.NewBuffer(ev.Body)

		err = json.Unmarshal(buf.Bytes(), &s)
		if err != nil {
			continue
		}

		parts := strings.Split(ev.Path, "/")

		if c.query.pathMatches(parts, ev.Path) {
			//key := s.Config.ServiceAddr.String()

			if s.Registered == true {
				c.muxChan <- service.ServiceDiscovered{
					Service: &s,
				}
			} else {
				c.muxChan <- service.ServiceRemoved{
					Service: &s,
				}
			}
		}
	}
}
Beispiel #4
0
func (d *doozerConnection) Walk(rev int64, root string, v doozer.Visitor, errors chan<- error) {
	// TODO: we need to recover from failure here, but we need to make caller aware so they don't duplicate entries when we start the walk over again

	doozer.Walk(d.Connection.(*doozer.Conn), rev, root, v, errors)
}