Beispiel #1
0
func doServerWatch(kapi client.KeysAPI) {

	watcher := kapi.Watcher(runningbase, &client.WatcherOptions{Recursive: true})

	for true {
		resp, err := watcher.Next(context.TODO())

		if err != nil {
			if _, ok := err.(*client.ClusterError); ok {
				continue
			}
			log.Fatal(err)
		}

		fmt.Println(resp.Node.Key + " " + resp.Node.Value)

		_, server := path.Split(resp.Node.Key)
		switch resp.Action {
		case "create":
			fmt.Println(server + " has started heart beat")
		case "compareAndSwap":
			fmt.Println(server + " heart beat")
		case "compareAndDelete":
			fmt.Println(server + " has shut down correctly")
		case "expire":
			fmt.Println("*** " + server + " has missed heartbeat")
		default:
			fmt.Println("Didn't handle " + resp.Action)
		}
	}

}
Beispiel #2
0
func watch(kAPI etcd.KeysAPI, key string, stop chan struct{}) (res *etcd.Response) {
	for res == nil {
		select {
		case <-stop:
			log.Debugf("Gracefully closing etcd watch loop: key=%s", key)
			return
		default:
			opts := &etcd.WatcherOptions{
				AfterIndex: 0,
				Recursive:  true,
			}
			watcher := kAPI.Watcher(key, opts)
			log.Debugf("Creating etcd watcher: %s", key)

			var err error
			res, err = watcher.Next(context.Background())
			if err != nil {
				log.Errorf("etcd watcher %v returned error: %v", key, err)
			}
		}

		// Let's not slam the etcd server in the event that we know
		// an unexpected error occurred.
		time.Sleep(time.Second)
	}

	return
}
Beispiel #3
0
func watchKey(key string, blockControlChan chan blocker.ControlMsg, kapi client.KeysAPI) {
	watcher := kapi.Watcher(key, &client.WatcherOptions{Recursive: true})

	for {
		response, _ := watcher.Next(context.Background())
		switch response.Action {
		case "create":
			// counter: sync.set
			handleKey(*response.Node, blockControlChan, true)
			log.Println("[sync]\tetcd: create: " + response.Node.Key)
		case "set":
			// counter: sync.set
			handleKey(*response.Node, blockControlChan, true)
			log.Println("[sync]\tetcd: create: " + response.Node.Key)
		case "delete":
			// counter: sync.delete
			handleKey(*response.Node, blockControlChan, false)
			log.Println("[sync]\tetcd: delete: " + response.Node.Key)
		case "expire":
			// counter: sync.expire
			handleKey(*response.Node, blockControlChan, false)
			log.Println("[sync]\tetcd: expired: " + response.Node.Key)
		}
	}
}
Beispiel #4
0
// etcdWatch calls etcd's Watch function, and handles any errors. Meant to be called
// as a goroutine.
func (w *etcdWatcher) etcdWatch(ctx context.Context, client etcd.KeysAPI, key string, resourceVersion uint64) {
	defer util.HandleCrash()
	defer close(w.etcdError)
	defer close(w.etcdIncoming)
	if resourceVersion == 0 {
		latest, err := etcdGetInitialWatchState(ctx, client, key, w.list, w.etcdIncoming)
		if err != nil {
			w.etcdError <- err
			return
		}
		resourceVersion = latest
	}

	opts := etcd.WatcherOptions{
		Recursive:  w.list,
		AfterIndex: resourceVersion,
	}
	watcher := client.Watcher(key, &opts)
	w.stopLock.Lock()
	w.ctx, w.cancel = context.WithCancel(ctx)
	w.stopLock.Unlock()

	for {
		resp, err := watcher.Next(w.ctx)
		if err != nil {
			w.etcdError <- err
			return
		}
		w.etcdIncoming <- resp
	}
}
Beispiel #5
0
// non-blocking
func mustWatchServiceDefs(ctx context.Context, client etcd.KeysAPI, basepath *string, changed chan<- bool) {
	wOpts := &etcd.WatcherOptions{Recursive: true}
	watcher := client.Watcher(*basepath, wOpts)

	watchOperation := func() error {
		resp, err := watcher.Next(ctx)
		if err != nil {
			switch v := err.(type) {
			case etcd.Error:
				if v.Code == etcd.ErrorCodeEventIndexCleared {
					watcher = client.Watcher(*basepath, wOpts)

					log.WithFields(log.Fields{
						"basepath": *basepath,
						"code":     v.Code,
						"cause":    v.Cause,
						"index":    v.Index,
						"message":  v.Message,
					}).Warn("refreshed watcher")

					return nil
				}
			default:
				if err.Error() == "unexpected end of JSON input" {
					log.WithField("error", err).Warn("probably a connection timeout. are we in etcd 0.4.x?")
					return nil
				} else {
					return err
				}
			}
		}

		if resp.Action != "get" {
			changed <- true
		}

		return nil
	}

	notify := func(err error, dur time.Duration) {
		log.WithFields(log.Fields{
			"dur":          dur,
			"error":        err,
			"service_path": *basepath,
		}).Error("service definition watch failed. backing off.")
	}

	go func() {
		for {
			err := backoff.RetryNotify(watchOperation, backoff.NewExponentialBackOff(), notify)
			if err != nil {
				log.WithFields(log.Fields{
					"error":        err,
					"service_path": *basepath,
				}).Fatal("unable to recover communication with etcd, watch abandoned")
			}
		}
	}()
}
Beispiel #6
0
// etcdWatch calls etcd's Watch function, and handles any errors. Meant to be called
// as a goroutine.
func (w *etcdWatcher) etcdWatch(ctx context.Context, client etcd.KeysAPI, key string, resourceVersion uint64) {
	defer utilruntime.HandleCrash()
	defer close(w.etcdError)
	defer close(w.etcdIncoming)

	// All calls to etcd are coming from this function - once it is finished
	// no other call to etcd should be generated by this watcher.
	done := func() {}

	// We need to be prepared, that Stop() can be called at any time.
	// It can potentially also be called, even before this function is called.
	// If that is the case, we simply skip all the code here.
	// See #18928 for more details.
	var watcher etcd.Watcher
	returned := func() bool {
		w.stopLock.Lock()
		defer w.stopLock.Unlock()
		if w.stopped {
			// Watcher has already been stopped - don't event initiate it here.
			return true
		}
		w.wg.Add(1)
		done = w.wg.Done
		// Perform initialization of watcher under lock - we want to avoid situation when
		// Stop() is called in the meantime (which in tests can cause etcd termination and
		// strange behavior here).
		if resourceVersion == 0 {
			latest, err := etcdGetInitialWatchState(ctx, client, key, w.list, w.quorum, w.etcdIncoming)
			if err != nil {
				w.etcdError <- err
				return true
			}
			resourceVersion = latest
		}

		opts := etcd.WatcherOptions{
			Recursive:  w.list,
			AfterIndex: resourceVersion,
		}
		watcher = client.Watcher(key, &opts)
		w.ctx, w.cancel = context.WithCancel(ctx)
		return false
	}()
	defer done()
	if returned {
		return
	}

	for {
		resp, err := watcher.Next(w.ctx)
		if err != nil {
			w.etcdError <- err
			return
		}
		w.etcdIncoming <- resp
	}
}
func generateConfigWatcher(kapi client.KeysAPI, resp *client.Response) (*client.Response, error) {

	watcher := kapi.Watcher(*etcdPrefix, &client.WatcherOptions{Recursive: true, AfterIndex: resp.Index})
	ctx := context.Background()

	resp, err := watcher.Next(ctx)
	if err != nil {
		return resp, err
	}
	return generateConfig(kapi)

}
Beispiel #8
0
func watch(kapi client.KeysAPI, key string, fn callback) {
	watcher := kapi.Watcher(key, nil)

	for {
		resp, err := watcher.Next(context.TODO())
		if err != nil {
			log.Printf("!! ERR: %v\n", err)
			break
		}
		if err := fn(resp.Node); err != nil {
			// we got an outdated node. we should trigger a get
			// to ensure we get the latest value.
			get(kapi, key, fn)
		}
	}

}
Beispiel #9
0
func watchService(ctx *done.Context, api client.KeysAPI, space *Namespace) {
	dir := watchDir(space.name)
	opts := &client.WatcherOptions{Recursive: true}
	watcher := api.Watcher(dir, opts)

	log.Infof("watch %s start", dir)
	for {
		update(watcher, space)
		select {
		case <-ctx.Done():
			log.Infof("watch %s done", dir)
			ctx.OK()
			return
		default:
			continue
		}
	}
}
Beispiel #10
0
func waitForEtcd(api client.KeysAPI, c *cli.Context) error {
	_, err := api.Get(context.Background(), c.String("key-watch"), nil)
	if err != nil {
		// key is not present have to watch it
		if m, _ := regexp.MatchString("100", err.Error()); m {
			w := api.Watcher(c.String("key-watch"), &client.WatcherOptions{AfterIndex: 0, Recursive: false})
			_, err := w.Next(context.Background())
			if err != nil {
				return err
			}
			return nil
		} else {
			return err
		}
	}
	// key is already present
	return nil
}
Beispiel #11
0
func doServer(kapi client.KeysAPI) {
	var key = configbase + *servername

	var settings map[string]string
	settings = make(map[string]string)

	resp, err := kapi.Get(context.TODO(), key, &client.GetOptions{Recursive: true})
	if err != nil {
		log.Fatal(err)
	}

	for _, node := range resp.Node.Nodes {
		_, setting := path.Split(node.Key)
		settings[setting] = node.Value
	}

	fmt.Println(settings)

	watcher := kapi.Watcher(key, &client.WatcherOptions{Recursive: true})

	for true {
		resp, err := watcher.Next(context.TODO())

		if err != nil {
			if _, ok := err.(*client.ClusterError); ok {
				continue
			}
			log.Fatal(err)
		}

		switch resp.Action {
		case "set":
			_, setting := path.Split(resp.Node.Key)
			settings[setting] = resp.Node.Value
		case "delete", "expire":
			_, setting := path.Split(resp.Node.Key)
			delete(settings, setting)
		}

		fmt.Println(settings)
	}

}
Beispiel #12
0
// watchCommandFunc executes the "watch" command.
func watchCommandFunc(c *cli.Context, ki client.KeysAPI) {
	if len(c.Args()) == 0 {
		handleError(ExitBadArgs, errors.New("key required"))
	}
	key := c.Args()[0]
	recursive := c.Bool("recursive")
	forever := c.Bool("forever")
	index := 0
	if c.Int("after-index") != 0 {
		index = c.Int("after-index") + 1
	}

	stop := false
	w := ki.Watcher(key, &client.WatcherOptions{AfterIndex: uint64(index), Recursive: recursive})

	sigch := make(chan os.Signal, 1)
	signal.Notify(sigch, os.Interrupt)

	go func() {
		<-sigch
		os.Exit(0)
	}()

	for !stop {
		resp, err := w.Next(context.TODO())
		if err != nil {
			handleError(ExitServerError, err)
		}
		if resp.Node.Dir {
			continue
		}
		if recursive {
			fmt.Printf("[%s] %s\n", resp.Action, resp.Node.Key)
		}

		printResponseKey(resp, c.GlobalString("output"))

		if !forever {
			stop = true
		}
	}
}
Beispiel #13
0
func OnUpdateIFace(etcdCli etcd.Client, key string, fn func(val interface{})) {
	var kapi etcd.KeysAPI
	var ctx context.Context

	kapi = etcd.NewKeysAPI(etcdCli)
	ctx = context.Background()

	go func(w etcd.Watcher) {
		var err error
		var resp *etcd.Response

		for {
			resp, err = w.Next(ctx)
			if err == nil {
				Goose.Updater.Logf(3, "Updating config variable %s = %s", key, resp.Node.Value)
				fn(resp.Node.Value)
			} else {
				Goose.Updater.Logf(1, "Error updating config variable %s (%s)", key, err)
			}
		}
	}(kapi.Watcher("/"+key, nil))
}
func newNotifier(kapi client.KeysAPI, path string) notifier {
	w := notifier{make(chan struct{}, 1)}

	go func() {

		for {
			watcher := kapi.Watcher(path, &client.WatcherOptions{Recursive: true})

			var err error
			var response *client.Response
			for err == nil {
				response, err = watcher.Next(context.Background())
				logResponse(response)
				select {
				case w.ch <- struct{}{}:
					log.Println("received event from watcher, sent change message on notifier channel.")
				default:
					log.Println("received event from watcher, not sending message on notifier channel, buffer full and no-one listening.")
				}
			}

			if err == context.Canceled {
				log.Println("context cancelled error")
			} else if err == context.DeadlineExceeded {
				log.Println("deadline exceeded error")
			} else if cerr, ok := err.(*client.ClusterError); ok {
				log.Printf("cluster error. Details: %v\n", cerr.Detail())
			} else {
				// bad cluster endpoints, which are not etcd servers
				log.Println(err.Error())
			}

			log.Println("sleeping for 15s before rebuilding config due to error")
			time.Sleep(15 * time.Second)
		}
	}()

	return w
}
Beispiel #15
0
func OnUpdateTree(etcdCli etcd.Client, key string, fn func(key string, val interface{}, action string)) {
	var kapi etcd.KeysAPI
	var ctx context.Context

	kapi = etcd.NewKeysAPI(etcdCli)
	ctx = context.Background()

	go func(w etcd.Watcher) {
		var err error
		var resp *etcd.Response

		for {
			resp, err = w.Next(ctx)
			if err == nil {
				Goose.Updater.Logf(3, "Updating (%s) config variable %s = %s", resp.Action, key, resp.Node.Value)
				fn(resp.Node.Key, resp.Node.Value, resp.Action)
			} else {
				Goose.Updater.Logf(1, "Error updating config variable %s (%s)", key, err)
			}
		}
	}(kapi.Watcher("/"+key, &etcd.WatcherOptions{Recursive: true}))
}
func newWatcher(kapi client.KeysAPI, path string, socksProxy string, etcdPeers []string) watcher {
	w := watcher{make(chan struct{}, 1)}

	go func() {

		watcher := kapi.Watcher(path, &client.WatcherOptions{Recursive: true})

		for {
			_, err := watcher.Next(context.Background())
			if err != nil {
				log.Printf("watch failed %v, sleeping for 1s\n", err.Error())
				time.Sleep(1 * time.Second)
			} else {
				select {
				case w.ch <- struct{}{}:
				default:
				}
			}
		}
	}()

	return w
}
Beispiel #17
0
// execWatchCommandFunc executes the "exec-watch" command.
func execWatchCommandFunc(c *cli.Context, ki client.KeysAPI) {
	args := c.Args()
	argslen := len(args)

	if argslen < 2 {
		handleError(ExitBadArgs, errors.New("key and command to exec required"))
	}

	var (
		key     string
		cmdArgs []string
	)

	foundSep := false
	for i := range args {
		if args[i] == "--" && i != 0 {
			foundSep = true
			break
		}
	}

	if foundSep {
		key = args[0]
		cmdArgs = args[2:]
	} else {
		// If no flag is parsed, the order of key and cmdArgs will be switched and
		// args will not contain `--`.
		key = args[argslen-1]
		cmdArgs = args[:argslen-1]
	}

	index := 0
	if c.Int("after-index") != 0 {
		index = c.Int("after-index") + 1
	}

	recursive := c.Bool("recursive")

	sigch := make(chan os.Signal, 1)
	signal.Notify(sigch, os.Interrupt)

	go func() {
		<-sigch
		os.Exit(0)
	}()

	w := ki.Watcher(key, &client.WatcherOptions{AfterIndex: uint64(index), Recursive: recursive})

	for {
		resp, err := w.Next(context.TODO())
		if err != nil {
			handleError(ExitServerError, err)
		}
		if resp.Node.Dir {
			fmt.Fprintf(os.Stderr, "Ignored dir %s change", resp.Node.Key)
			continue
		}

		cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
		cmd.Env = environResponse(resp, os.Environ())

		stdout, err := cmd.StdoutPipe()
		if err != nil {
			fmt.Fprintf(os.Stderr, err.Error())
			os.Exit(1)
		}
		stderr, err := cmd.StderrPipe()
		if err != nil {
			fmt.Fprintf(os.Stderr, err.Error())
			os.Exit(1)
		}

		go func() {
			err := cmd.Start()
			if err != nil {
				fmt.Fprintf(os.Stderr, err.Error())
				os.Exit(1)
			}
			go io.Copy(os.Stdout, stdout)
			go io.Copy(os.Stderr, stderr)
			cmd.Wait()
		}()
	}
}
Beispiel #18
0
func main() {
	flag.Parse()

	if config.Version {
		fmt.Printf("skydns server version: %s\n", server.Version)
		os.Exit(0)
	}

	machines := strings.Split(machine, ",")

	var clientptr *etcdv3.Client
	var err error
	var clientv3 etcdv3.Client
	var clientv2 etcd.KeysAPI

	if config.Etcd3 {
		clientptr, err = newEtcdV3Client(machines, tlspem, tlskey, cacert)
		clientv3 = *clientptr
	} else {
		clientv2, err = newEtcdV2Client(machines, tlspem, tlskey, cacert, username, password)
	}

	if err != nil {
		panic(err)
	}

	if nameserver != "" {
		for _, hostPort := range strings.Split(nameserver, ",") {
			if err := validateHostPort(hostPort); err != nil {
				log.Fatalf("skydns: nameserver is invalid: %s", err)
			}
			config.Nameservers = append(config.Nameservers, hostPort)
		}
	}
	if err := validateHostPort(config.DnsAddr); err != nil {
		log.Fatalf("skydns: addr is invalid: %s", err)
	}

	if config.Etcd3 {
		if err := loadEtcdV3Config(clientv3, config); err != nil {
			log.Fatalf("skydns: %s", err)
		}
	} else {
		if err := loadEtcdV2Config(clientv2, config); err != nil {
			log.Fatalf("skydns: %s", err)
		}
	}

	if err := server.SetDefaults(config); err != nil {
		log.Fatalf("skydns: defaults could not be set from /etc/resolv.conf: %v", err)
	}

	if config.Local != "" {
		config.Local = dns.Fqdn(config.Local)
	}

	var backend server.Backend
	if config.Etcd3 {
		backend = backendetcdv3.NewBackendv3(clientv3, ctx, &backendetcdv3.Config{
			Ttl:      config.Ttl,
			Priority: config.Priority,
		})
	} else {
		backend = backendetcd.NewBackend(clientv2, ctx, &backendetcd.Config{
			Ttl:      config.Ttl,
			Priority: config.Priority,
		})
	}

	s := server.New(backend, config)
	if stub {
		s.UpdateStubZones()
		go func() {
			duration := 1 * time.Second

			if config.Etcd3 {
				var watcher etcdv3.WatchChan
				watcher = clientv3.Watch(ctx, msg.Path(config.Domain)+"/dns/stub/", etcdv3.WithPrefix())

				for wresp := range watcher {
					if wresp.Err() != nil {
						log.Printf("skydns: stubzone update failed, sleeping %s + ~3s", duration)
						time.Sleep(duration + (time.Duration(rand.Float32() * 3e9)))
						duration *= 2
						if duration > 32*time.Second {
							duration = 32 * time.Second
						}
					} else {
						s.UpdateStubZones()
						log.Printf("skydns: stubzone update")
						duration = 1 * time.Second //reset
					}
				}
			} else {
				var watcher etcd.Watcher

				watcher = clientv2.Watcher(msg.Path(config.Domain)+"/dns/stub/", &etcd.WatcherOptions{AfterIndex: 0, Recursive: true})

				for {
					_, err := watcher.Next(ctx)

					if err != nil {
						//
						log.Printf("skydns: stubzone update failed, sleeping %s + ~3s", duration)
						time.Sleep(duration + (time.Duration(rand.Float32() * 3e9))) // Add some random.
						duration *= 2
						if duration > 32*time.Second {
							duration = 32 * time.Second
						}
					} else {
						s.UpdateStubZones()
						log.Printf("skydns: stubzone update")
						duration = 1 * time.Second // reset
					}
				}
			}
		}()
	}

	if err := metrics.Metrics(); err != nil {
		log.Fatalf("skydns: %s", err)
	} else {
		log.Printf("skydns: metrics enabled on :%s%s", metrics.Port, metrics.Path)
	}

	if err := s.Run(); err != nil {
		log.Fatalf("skydns: %s", err)
	}
}