func update(watcher client.Watcher, space *Namespace) { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() res, err := watcher.Next(ctx) if err != nil { if err != context.DeadlineExceeded { log.Errorf("watcher next failed, err[%v]", err) } return } node := res.Node if node == nil { log.Errorf("node is nil") return } if node.Dir { log.Infof("node is dir") return } service, url, err := parsePath(node.Key, space.name) if err != nil { log.Errorf("parse path failed, err[%v]", err) return } log.Infof("%s %s %s", res.Action, service, url) switch res.Action { case "set", "update": space.updateEndpoint(service, url) case "delete": space.deleteEndpoint(service, url) case "expire": space.expireEndpoint(service, url) } }
func (d *EtcdStateDriver) channelEtcdEvents(watcher client.Watcher, rsps chan [2][]byte) { for { // block on change notifications etcdRsp, err := watcher.Next(context.Background()) if err != nil { log.Errorf("Error %v during watch", err) time.Sleep(time.Second) continue } // XXX: The logic below assumes that the node returned is always a node // of interest. Eg: If we set a watch on /a/b/c, then we are mostly // interested in changes in that directory i.e. changes to /a/b/c/d1..d2 // This works for now as the constructs like network and endpoints that // need to be watched are organized as above. Need to revisit when // this assumption changes. rsp := [2][]byte{nil, nil} eventStr := "create" if etcdRsp.Node.Value != "" { rsp[0] = []byte(etcdRsp.Node.Value) } if etcdRsp.PrevNode != nil && etcdRsp.PrevNode.Value != "" { rsp[1] = []byte(etcdRsp.PrevNode.Value) if etcdRsp.Node.Value != "" { eventStr = "modify" } else { eventStr = "delete" } } log.Debugf("Received %q for key: %s", eventStr, etcdRsp.Node.Key) //channel the translated response rsps <- rsp } }
func watcheWorker(c ctx.Context, watcher etcd.Watcher) <-chan []*Info { v := make(chan []*Info) go func() { evt, err := watcher.Next(c) if err != nil { log.WithFields(log.Fields{"err": err}).Debug("config") retry.Delay() v <- nil } else { retry.Reset() if evt.Node.Dir { log.WithFields(log.Fields{"key": evt.Node.Key}).Warning("not a valid node") v <- nil } else { log.WithFields(log.Fields{"key": evt.Node.Key}).Warning("cfgkey") switch evt.Action { default: v <- nil break case "set": v <- get(evt.Node.Value) break case "del": case "expire": v <- make([]*Info, 0) break } } } }() return v }
// etcdWatch calls etcd's Watch function, and handles any errors. Meant to be called // as a goroutine. func (w *etcdWatcher) etcdWatch(ctx context.Context, client etcd.KeysAPI, key string, resourceVersion uint64) { defer utilruntime.HandleCrash() defer close(w.etcdError) defer close(w.etcdIncoming) // All calls to etcd are coming from this function - once it is finished // no other call to etcd should be generated by this watcher. done := func() {} // We need to be prepared, that Stop() can be called at any time. // It can potentially also be called, even before this function is called. // If that is the case, we simply skip all the code here. // See #18928 for more details. var watcher etcd.Watcher returned := func() bool { w.stopLock.Lock() defer w.stopLock.Unlock() if w.stopped { // Watcher has already been stopped - don't event initiate it here. return true } w.wg.Add(1) done = w.wg.Done // Perform initialization of watcher under lock - we want to avoid situation when // Stop() is called in the meantime (which in tests can cause etcd termination and // strange behavior here). if resourceVersion == 0 { latest, err := etcdGetInitialWatchState(ctx, client, key, w.list, w.quorum, w.etcdIncoming) if err != nil { w.etcdError <- err return true } resourceVersion = latest } opts := etcd.WatcherOptions{ Recursive: w.list, AfterIndex: resourceVersion, } watcher = client.Watcher(key, &opts) w.ctx, w.cancel = context.WithCancel(ctx) return false }() defer done() if returned { return } for { resp, err := watcher.Next(w.ctx) if err != nil { w.etcdError <- err return } w.etcdIncoming <- resp } }
func watchMockEtcd(ctx context.Context, watcher etcd.Watcher, result chan error) { type evt struct { key string event string received bool } expected := []evt{ {"/coreos.com/network/foobar/config", "create", false}, {"/coreos.com/network/blah/config", "create", false}, {"/coreos.com/network/blah/config", "update", false}, {"/coreos.com/network/foobar/config", "delete", false}, {"/coreos.com/network/foobar", "delete", false}, } // Wait for delete events on /coreos.com/network/foobar and its // 'config' child, and for the update event on // /coreos.com/network/foobar (for 'config' delete) and on // /coreos.com/network (for 'foobar' delete) numEvents := 0 for { resp, err := watcher.Next(ctx) if err != nil { if err == context.Canceled { break } result <- fmt.Errorf("Unexpected error watching for event: %v", err) break } if resp.Node == nil { result <- fmt.Errorf("Unexpected empty node watching for event") break } found := false for i, e := range expected { if e.key == resp.Node.Key && e.event == resp.Action { if expected[i].received != true { expected[i].received = true found = true numEvents += 1 } break } } if found == false { result <- fmt.Errorf("Received unexpected or already received event %v", resp) break } if numEvents == len(expected) { result <- nil break } } }
// wrap the etcd watcher.Next blocking function inside of a channel func (etcdO *EtcdWObject) EtcdChannelWatch(watcher etcd.Watcher, context etcd_context.Context) chan *EtcdChannelWatchResponse { ch := make(chan *EtcdChannelWatchResponse) go func() { for { resp, err := watcher.Next(context) // blocks here ch <- &EtcdChannelWatchResponse{resp, err} } }() return ch }
func (service *ConsistentService) watch(watcher client.Watcher) { for { resp, err := watcher.Next(context.Background()) if err == nil { if resp.Action == "set" { n := resp.Node.Value service.consis.Add(n) } else if resp.Action == "delete" { n := resp.PrevNode.Value service.consis.Remove(n) } } } }
func watchLoop(w client.Watcher, ctx context.Context, r chan *client.Response) { for { response, err := w.Next(ctx) if err == context.Canceled { close(r) return } if err != nil { panic(err) } r <- response } }
func (w *watcher) run(watch client.Watcher, ctx context.Context, out chan<- bool) { for { _, err := watch.Next(ctx) if err != nil { log.Printf( "Got an error from etcd [%s], closing chan and exiting\n", err, ) close(out) return } out <- true } }
func (service *ConsistentService) watch(watcher client.Watcher) { for { resp, err := watcher.Next(context.Background()) if err == nil { if resp.Action == "set" { n := resp.Node.Value if _, ok := service.nodes[n]; !ok { service.consis.Add(n) service.nodes[n] = true } } else if resp.Action == "delete" { n := resp.PrevNode.Value if _, ok := service.nodes[n]; ok { service.consis.Remove(n) delete(service.nodes, n) } } } } }
func etcdWatch(ctx context.Context, w etcd.Watcher) chan *event.Event { c := make(chan *event.Event) go func() { for { resp, err := w.Next(ctx) // blocks here if err != nil { log.Println(err) } var ret bool switch resp.Node.Value { case "true": ret = true case "false": ret = false default: log.Printf("Unknown value %v, must be ai boolean", resp.Node.Value) } c <- &event.Event{ret, ""} } }() return c }
func main() { flag.Parse() machines := strings.Split(machine, ",") client, err := newEtcdClient(machines, tlspem, tlskey, cacert) if err != nil { panic(err) } if nameserver != "" { for _, hostPort := range strings.Split(nameserver, ",") { if err := validateHostPort(hostPort); err != nil { log.Fatalf("skydns: nameserver is invalid: %s", err) } config.Nameservers = append(config.Nameservers, hostPort) } } if err := validateHostPort(config.DnsAddr); err != nil { log.Fatalf("skydns: addr is invalid: %s", err) } if err := loadConfig(client, config); err != nil { log.Fatalf("skydns: %s", err) } if err := server.SetDefaults(config); err != nil { log.Fatalf("skydns: defaults could not be set from /etc/resolv.conf: %v", err) } if config.Local != "" { config.Local = dns.Fqdn(config.Local) } backend := backendetcd.NewBackend(client, ctx, &backendetcd.Config{ Ttl: config.Ttl, Priority: config.Priority, }) s := server.New(backend, config) if stub { s.UpdateStubZones() go func() { duration := 1 * time.Second var watcher etcd.Watcher watcher = client.Watcher(msg.Path(config.Domain)+"/dns/stub/", &etcd.WatcherOptions{AfterIndex: 0, Recursive: true}) for { _, err := watcher.Next(ctx) if err != nil { // log.Printf("skydns: stubzone update failed, sleeping %s + ~3s", duration) time.Sleep(duration + (time.Duration(rand.Float32() * 3e9))) // Add some random. duration *= 2 if duration > 32*time.Second { duration = 32 * time.Second } } else { s.UpdateStubZones() log.Printf("skydns: stubzone update") duration = 1 * time.Second // reset } } }() } if err := metrics.Metrics(); err != nil { log.Fatalf("skydns: %s", err) } else { log.Printf("skydns: metrics enabled on :%s%s", metrics.Port, metrics.Path) } if err := s.Run(); err != nil { log.Fatalf("skydns: %s", err) } }
func main() { flag.Parse() if config.Version { fmt.Printf("skydns server version: %s\n", server.Version) os.Exit(0) } machines := strings.Split(machine, ",") var clientptr *etcdv3.Client var err error var clientv3 etcdv3.Client var clientv2 etcd.KeysAPI if config.Etcd3 { clientptr, err = newEtcdV3Client(machines, tlspem, tlskey, cacert) clientv3 = *clientptr } else { clientv2, err = newEtcdV2Client(machines, tlspem, tlskey, cacert, username, password) } if err != nil { panic(err) } if nameserver != "" { for _, hostPort := range strings.Split(nameserver, ",") { if err := validateHostPort(hostPort); err != nil { log.Fatalf("skydns: nameserver is invalid: %s", err) } config.Nameservers = append(config.Nameservers, hostPort) } } if err := validateHostPort(config.DnsAddr); err != nil { log.Fatalf("skydns: addr is invalid: %s", err) } if config.Etcd3 { if err := loadEtcdV3Config(clientv3, config); err != nil { log.Fatalf("skydns: %s", err) } } else { if err := loadEtcdV2Config(clientv2, config); err != nil { log.Fatalf("skydns: %s", err) } } if err := server.SetDefaults(config); err != nil { log.Fatalf("skydns: defaults could not be set from /etc/resolv.conf: %v", err) } if config.Local != "" { config.Local = dns.Fqdn(config.Local) } var backend server.Backend if config.Etcd3 { backend = backendetcdv3.NewBackendv3(clientv3, ctx, &backendetcdv3.Config{ Ttl: config.Ttl, Priority: config.Priority, }) } else { backend = backendetcd.NewBackend(clientv2, ctx, &backendetcd.Config{ Ttl: config.Ttl, Priority: config.Priority, }) } s := server.New(backend, config) if stub { s.UpdateStubZones() go func() { duration := 1 * time.Second if config.Etcd3 { var watcher etcdv3.WatchChan watcher = clientv3.Watch(ctx, msg.Path(config.Domain)+"/dns/stub/", etcdv3.WithPrefix()) for wresp := range watcher { if wresp.Err() != nil { log.Printf("skydns: stubzone update failed, sleeping %s + ~3s", duration) time.Sleep(duration + (time.Duration(rand.Float32() * 3e9))) duration *= 2 if duration > 32*time.Second { duration = 32 * time.Second } } else { s.UpdateStubZones() log.Printf("skydns: stubzone update") duration = 1 * time.Second //reset } } } else { var watcher etcd.Watcher watcher = clientv2.Watcher(msg.Path(config.Domain)+"/dns/stub/", &etcd.WatcherOptions{AfterIndex: 0, Recursive: true}) for { _, err := watcher.Next(ctx) if err != nil { // log.Printf("skydns: stubzone update failed, sleeping %s + ~3s", duration) time.Sleep(duration + (time.Duration(rand.Float32() * 3e9))) // Add some random. duration *= 2 if duration > 32*time.Second { duration = 32 * time.Second } } else { s.UpdateStubZones() log.Printf("skydns: stubzone update") duration = 1 * time.Second // reset } } } }() } if err := metrics.Metrics(); err != nil { log.Fatalf("skydns: %s", err) } else { log.Printf("skydns: metrics enabled on :%s%s", metrics.Port, metrics.Path) } if err := s.Run(); err != nil { log.Fatalf("skydns: %s", err) } }