func createJobInOrder(ctx context.Context, kapi client.KeysAPI, j *job) error { errChan := make(chan error) done := make(chan struct{}) go func() { buf := new(bytes.Buffer) if err := json.NewEncoder(buf).Encode(j); err != nil { errChan <- err return } value := buf.String() fmt.Println("createJobInOrder:", value) opts := &client.CreateInOrderOptions{} if _, err := kapi.CreateInOrder(ctx, dirName, value, opts); err != nil { errChan <- err return } done <- struct{}{} }() select { case <-done: return nil case v := <-errChan: return v case <-ctx.Done(): return ctx.Err() } }
func sync(kapi client.KeysAPI, syncChannel chan blocker.ControlMsg) { for { msg := <-syncChannel start := time.Now() var folder string if msg.Ip.To4() == nil { folder = "dblock6/" } else { folder = "dblock/" } _, err := kapi.Set(context.Background(), folder+msg.Ip.String(), "0", &client.SetOptions{TTL: 60 * time.Second, PrevExist: client.PrevNoExist}) if err != nil { if err.(client.Error).Code == client.ErrorCodeNodeExist { log.Print("Block already existed, not adding again") } else { log.Fatal(err) } } else { // print common key info log.Println("[sync]\tAdded block: " + msg.Ip.String()) } elapsed := time.Since(start) log.Printf("sync took %s", elapsed) sync_timing.Observe(elapsed.Seconds()) } }
// Create etcd directory structure from a map, slice or struct. func Create(kapi client.KeysAPI, path string, val reflect.Value) error { switch val.Kind() { case reflect.Ptr: orig := val.Elem() if !orig.IsValid() { return nil } if err := Create(kapi, path, orig); err != nil { return err } case reflect.Interface: orig := val.Elem() if err := Create(kapi, path, orig); err != nil { return err } case reflect.Struct: for i := 0; i < val.NumField(); i++ { t := val.Type().Field(i) k := t.Tag.Get("etcd") if err := Create(kapi, path+"/"+k, val.Field(i)); err != nil { return err } } case reflect.Map: if strings.HasPrefix(pathx.Base(path), "_") { log.Printf("create hidden directory in etcd: %s", path) } for _, k := range val.MapKeys() { v := val.MapIndex(k) if err := Create(kapi, path+"/"+k.String(), v); err != nil { return err } } case reflect.Slice: for i := 0; i < val.Len(); i++ { Create(kapi, fmt.Sprintf("%s/%d", path, i), val.Index(i)) } case reflect.String: if strings.HasPrefix(pathx.Base(path), "_") { log.Printf("set hidden key in etcd: %s", path) } _, err := kapi.Set(context.TODO(), path, val.String(), nil) if err != nil { return err } case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64: if strings.HasPrefix(pathx.Base(path), "_") { log.Printf("set hidden key in etcd: %s", path) } _, err := kapi.Set(context.TODO(), path, fmt.Sprintf("%v", val.Interface()), nil) if err != nil { return err } default: return fmt.Errorf("unsupported type: %s for path: %s", val.Kind(), path) } return nil }
// etcdWatch calls etcd's Watch function, and handles any errors. Meant to be called // as a goroutine. func (w *etcdWatcher) etcdWatch(ctx context.Context, client etcd.KeysAPI, key string, resourceVersion uint64) { defer util.HandleCrash() defer close(w.etcdError) defer close(w.etcdIncoming) if resourceVersion == 0 { latest, err := etcdGetInitialWatchState(ctx, client, key, w.list, w.etcdIncoming) if err != nil { w.etcdError <- err return } resourceVersion = latest } opts := etcd.WatcherOptions{ Recursive: w.list, AfterIndex: resourceVersion, } watcher := client.Watcher(key, &opts) w.stopLock.Lock() w.ctx, w.cancel = context.WithCancel(ctx) w.stopLock.Unlock() for { resp, err := watcher.Next(w.ctx) if err != nil { w.etcdError <- err return } w.etcdIncoming <- resp } }
// keysToLocal copies SSH host keys from etcd to the local file system. // // This only fails if the main key, sshHostKey cannot be stored or retrieved. func keysToLocal(c cookoo.Context, k client.KeysAPI, ciphers []string, etcdPath string) error { lpath := "/etc/ssh/ssh_host_%s_key" privkey := "%s/sshHost%sKey" for _, cipher := range ciphers { path := fmt.Sprintf(lpath, cipher) key := fmt.Sprintf(privkey, etcdPath, cipher) res, err := k.Get(dctx(), key, &client.GetOptions{}) if err != nil || res.Node == nil { continue } content := res.Node.Value if err := ioutil.WriteFile(path, []byte(content), 0600); err != nil { log.Errf(c, "Error writing ssh host key file: %s", err) } } // Now get generic key. res, err := k.Get(dctx(), "sshHostKey", &client.GetOptions{}) if err != nil || res.Node == nil { return fmt.Errorf("Failed to get sshHostKey from etcd. %v", err) } content := res.Node.Value if err := ioutil.WriteFile("/etc/ssh/ssh_host_key", []byte(content), 0600); err != nil { log.Errf(c, "Error writing ssh host key file: %s", err) return err } return nil }
func cleanBackends(kapi client.KeysAPI) { resp, err := kapi.Get(context.Background(), "/vulcand/backends/", &client.GetOptions{Recursive: true}) if err != nil { if e, _ := err.(client.Error); e.Code == etcderr.EcodeKeyNotFound { return } panic(err) } if !resp.Node.Dir { log.Println("/vulcand/backends is not a directory.") return } for _, be := range resp.Node.Nodes { beHasContent := false if be.Dir { for _, child := range be.Nodes { // anything apart from an empty "servers" dir means this is needed. if filepath.Base(child.Key) != "servers" || len(child.Nodes) > 0 { beHasContent = true break } } } if !beHasContent { _, err := kapi.Delete(context.Background(), be.Key, &client.DeleteOptions{Recursive: true}) if err != nil { log.Printf("failed to remove unwanted backend %v\n", be.Key) } } } }
func registerWithEtcd(api client.KeysAPI, c *cli.Context) error { _, err := api.Create(context.Background(), c.String("key-register"), "complete") if err != nil { return err } return nil }
func mustGetServices(ctx context.Context, client etcd.KeysAPI, basepath *string) services { resp, err := client.Get(ctx, *basepath, &etcd.GetOptions{Recursive: true}) if err != nil { log.WithFields(log.Fields{ "error": err, "basepath": *basepath, }).Fatal("unable to get service definitions from etcd") } var svcs services for _, node := range resp.Node.Nodes { s, err := newService(node.Key, []byte(node.Value)) if err != nil { log.WithFields(log.Fields{ "error": err, "basepath": *basepath, "key": node.Key, }).Warn("invalid service definition. skipping.") } else { svcs = append(svcs, s) } } return svcs }
func NewPubSubTopicByKey(ctx context.Context, keyid string, ttl time.Duration, kapi etcdc.KeysAPI) (*EtcdPubSubTopic, error) { _, err := kapi.Get(ctx, keyid, nil) if IsKeyNotFound(err) { opt := &etcdc.SetOptions{PrevExist: etcdc.PrevNoExist, TTL: ttl, Dir: true} _, err = kapi.Set(ctx, keyid, "", opt) if err != nil && !IsCompareAndSwapFailure(err) && !IsNodeExists(err) { return nil, err } } else if err != nil { return nil, err } keepalive, err := NewNodeKeepAlive(keyid, ttl, kapi) if err != nil { return nil, err } dlog("signal: new signal(pub/sub) for %v, ttl:%v", keyid, ttl) return &EtcdPubSubTopic{ ctx: ctx, keyid: keyid, kapi: kapi, ttl: ttl, keepalive: keepalive, stop: make(chan bool), }, nil }
func mustCreateServiceDirectory(ctx context.Context, kApi etcd.KeysAPI, basepath string) { myContext, myCancel := context.WithTimeout(ctx, DefaultTimeout) defer myCancel() _, err := kApi.Set(myContext, basepath, "", &etcd.SetOptions{Dir: true, PrevExist: etcd.PrevNoExist}) log.WithField("error", err).Warn("error creating servicedef directory") }
// mkCommandFunc executes the "mk" command. func mkCommandFunc(c *cli.Context, ki client.KeysAPI) { if len(c.Args()) == 0 { handleError(ExitBadArgs, errors.New("key required")) } key := c.Args()[0] value, err := argOrStdin(c.Args(), os.Stdin, 1) if err != nil { handleError(ExitBadArgs, errors.New("value required")) } ttl := c.Int("ttl") ctx, cancel := contextWithTotalTimeout(c) // Since PrevNoExist means that the Node must not exist previously, // this Set method always creates a new key. Therefore, mk command // succeeds only if the key did not previously exist, and the command // prevents one from overwriting values accidentally. resp, err := ki.Set(ctx, key, value, &client.SetOptions{TTL: time.Duration(ttl) * time.Second, PrevExist: client.PrevNoExist}) cancel() if err != nil { handleError(ExitServerError, err) } printResponseKey(resp, c.GlobalString("output")) }
// mkCommandFunc executes the "mk" command. func mkCommandFunc(c *cli.Context, ki client.KeysAPI) { if len(c.Args()) == 0 { handleError(ExitBadArgs, errors.New("key required")) } key := c.Args()[0] value, err := argOrStdin(c.Args(), os.Stdin, 1) if err != nil { handleError(ExitBadArgs, errors.New("value required")) } ttl := c.Int("ttl") inorder := c.Bool("in-order") var resp *client.Response ctx, cancel := contextWithTotalTimeout(c) if !inorder { // Since PrevNoExist means that the Node must not exist previously, // this Set method always creates a new key. Therefore, mk command // succeeds only if the key did not previously exist, and the command // prevents one from overwriting values accidentally. resp, err = ki.Set(ctx, key, value, &client.SetOptions{TTL: time.Duration(ttl) * time.Second, PrevExist: client.PrevNoExist}) } else { // If in-order flag is specified then create an inorder key under // the directory identified by the key argument. resp, err = ki.CreateInOrder(ctx, key, value, &client.CreateInOrderOptions{TTL: time.Duration(ttl) * time.Second}) } cancel() if err != nil { handleError(ExitServerError, err) } printResponseKey(resp, c.GlobalString("output")) }
func watch(kAPI etcd.KeysAPI, key string, stop chan struct{}) (res *etcd.Response) { for res == nil { select { case <-stop: log.Debugf("Gracefully closing etcd watch loop: key=%s", key) return default: opts := &etcd.WatcherOptions{ AfterIndex: 0, Recursive: true, } watcher := kAPI.Watcher(key, opts) log.Debugf("Creating etcd watcher: %s", key) var err error res, err = watcher.Next(context.Background()) if err != nil { log.Errorf("etcd watcher %v returned error: %v", key, err) } } // Let's not slam the etcd server in the event that we know // an unexpected error occurred. time.Sleep(time.Second) } return }
func doServerWatch(kapi client.KeysAPI) { watcher := kapi.Watcher(runningbase, &client.WatcherOptions{Recursive: true}) for true { resp, err := watcher.Next(context.TODO()) if err != nil { if _, ok := err.(*client.ClusterError); ok { continue } log.Fatal(err) } fmt.Println(resp.Node.Key + " " + resp.Node.Value) _, server := path.Split(resp.Node.Key) switch resp.Action { case "create": fmt.Println(server + " has started heart beat") case "compareAndSwap": fmt.Println(server + " heart beat") case "compareAndDelete": fmt.Println(server + " has shut down correctly") case "expire": fmt.Println("*** " + server + " has missed heartbeat") default: fmt.Println("Didn't handle " + resp.Action) } } }
func watchKey(key string, blockControlChan chan blocker.ControlMsg, kapi client.KeysAPI) { watcher := kapi.Watcher(key, &client.WatcherOptions{Recursive: true}) for { response, _ := watcher.Next(context.Background()) switch response.Action { case "create": // counter: sync.set handleKey(*response.Node, blockControlChan, true) log.Println("[sync]\tetcd: create: " + response.Node.Key) case "set": // counter: sync.set handleKey(*response.Node, blockControlChan, true) log.Println("[sync]\tetcd: create: " + response.Node.Key) case "delete": // counter: sync.delete handleKey(*response.Node, blockControlChan, false) log.Println("[sync]\tetcd: delete: " + response.Node.Key) case "expire": // counter: sync.expire handleKey(*response.Node, blockControlChan, false) log.Println("[sync]\tetcd: expired: " + response.Node.Key) } } }
// non-blocking func mustWatchServiceDefs(ctx context.Context, client etcd.KeysAPI, basepath *string, changed chan<- bool) { wOpts := &etcd.WatcherOptions{Recursive: true} watcher := client.Watcher(*basepath, wOpts) watchOperation := func() error { resp, err := watcher.Next(ctx) if err != nil { switch v := err.(type) { case etcd.Error: if v.Code == etcd.ErrorCodeEventIndexCleared { watcher = client.Watcher(*basepath, wOpts) log.WithFields(log.Fields{ "basepath": *basepath, "code": v.Code, "cause": v.Cause, "index": v.Index, "message": v.Message, }).Warn("refreshed watcher") return nil } default: if err.Error() == "unexpected end of JSON input" { log.WithField("error", err).Warn("probably a connection timeout. are we in etcd 0.4.x?") return nil } else { return err } } } if resp.Action != "get" { changed <- true } return nil } notify := func(err error, dur time.Duration) { log.WithFields(log.Fields{ "dur": dur, "error": err, "service_path": *basepath, }).Error("service definition watch failed. backing off.") } go func() { for { err := backoff.RetryNotify(watchOperation, backoff.NewExponentialBackOff(), notify) if err != nil { log.WithFields(log.Fields{ "error": err, "service_path": *basepath, }).Fatal("unable to recover communication with etcd, watch abandoned") } } }() }
func setValues(kapi client.KeysAPI, kvs map[string]string) error { for k, v := range kvs { if _, err := kapi.Set(context.Background(), k, v, &client.SetOptions{}); err != nil { return err } } return nil }
// ClientGet gets data from etcd sending in an url and receiving a etcd.Response object func ClientGet(kapi client.KeysAPI, url string) *client.Response { resp, err := kapi.Get(context.Background(), url, &clientGetOpts) if err != nil { logr.LogLine(logr.Lfatal, ltagsrc, err.Error()) os.Exit(2) } return resp }
func generateConfig(kapi client.KeysAPI) (*client.Response, error) { resp, err := kapi.Get(context.Background(), *etcdPrefix, &client.GetOptions{Recursive: true}) if err != nil { return resp, err } traverseConfigDirectory(resp.Node) return resp, nil }
func importFunc(dir string, file string, f iodatafmt.DataFmt, replace bool, yes bool, e Etcdtool, c *cli.Context, ki client.KeysAPI) { // Check if dir exists and is a directory. exists, err := dirExists(dir, c, ki) if err != nil { fatalf("Specified dir doesn't exist: %s", dir) } if exists { exist, err := isDir(dir, c, ki) if err != nil { fatal(err.Error()) } if exist { fatalf("Specified dir is not a directory: %s", dir) } } // Load file. m, err := iodatafmt.Load(file, f) if err != nil { fatal(err.Error()) } // Validate data. if c.Bool("validate") { validateFunc(e, dir, m) } if exists { if replace { if !askYesNo(fmt.Sprintf("Do you want to overwrite data in directory: %s", dir)) { os.Exit(1) } // Delete dir. if _, err = ki.Delete(context.TODO(), dir, &client.DeleteOptions{Recursive: true}); err != nil { fatal(err.Error()) } } else { if !yes { if !askYesNo(fmt.Sprintf("Do you want to overwrite data in directory: %s", dir)) { os.Exit(1) } } } } else { // Create dir. if _, err := ki.Set(context.TODO(), dir, "", &client.SetOptions{Dir: true}); err != nil { fatal(err.Error()) } } // Import data. if err = etcdmap.Create(ki, dir, reflect.ValueOf(m)); err != nil { fatal(err.Error()) } }
func deleteRecursiveIfExists(kapi client.KeysAPI, path string) (err error) { _, err = kapi.Delete(context.Background(), path, &client.DeleteOptions{Recursive: true}) if e, _ := err.(client.Error); e.Code == etcderr.EcodeKeyNotFound { // ignore not found. err = nil } return err }
// etcdWatch calls etcd's Watch function, and handles any errors. Meant to be called // as a goroutine. func (w *etcdWatcher) etcdWatch(ctx context.Context, client etcd.KeysAPI, key string, resourceVersion uint64) { defer utilruntime.HandleCrash() defer close(w.etcdError) defer close(w.etcdIncoming) // All calls to etcd are coming from this function - once it is finished // no other call to etcd should be generated by this watcher. done := func() {} // We need to be prepared, that Stop() can be called at any time. // It can potentially also be called, even before this function is called. // If that is the case, we simply skip all the code here. // See #18928 for more details. var watcher etcd.Watcher returned := func() bool { w.stopLock.Lock() defer w.stopLock.Unlock() if w.stopped { // Watcher has already been stopped - don't event initiate it here. return true } w.wg.Add(1) done = w.wg.Done // Perform initialization of watcher under lock - we want to avoid situation when // Stop() is called in the meantime (which in tests can cause etcd termination and // strange behavior here). if resourceVersion == 0 { latest, err := etcdGetInitialWatchState(ctx, client, key, w.list, w.quorum, w.etcdIncoming) if err != nil { w.etcdError <- err return true } resourceVersion = latest } opts := etcd.WatcherOptions{ Recursive: w.list, AfterIndex: resourceVersion, } watcher = client.Watcher(key, &opts) w.ctx, w.cancel = context.WithCancel(ctx) return false }() defer done() if returned { return } for { resp, err := watcher.Next(w.ctx) if err != nil { w.etcdError <- err return } w.etcdIncoming <- resp } }
func setHostPort(k client.KeysAPI, base, host, port string, ttl time.Duration) error { o := client.SetOptions{TTL: ttl} if _, err := k.Set(dctx(), base+"/host", host, &o); err != nil { return err } if _, err := k.Set(dctx(), base+"/port", port, &o); err != nil { return err } return nil }
func doConfig(kapi client.KeysAPI) { var key = configbase + *configserver + "/" + *configvar resp, err := kapi.Set(context.TODO(), key, *configval, nil) if err != nil { log.Fatal(err) } fmt.Println(resp.Action + " " + resp.Node.Key + " to " + resp.Node.Value) }
func generateConfigWatcher(kapi client.KeysAPI, resp *client.Response) (*client.Response, error) { watcher := kapi.Watcher(*etcdPrefix, &client.WatcherOptions{Recursive: true, AfterIndex: resp.Index}) ctx := context.Background() resp, err := watcher.Next(ctx) if err != nil { return resp, err } return generateConfig(kapi) }
func isDir(dir string, c *cli.Context, ki client.KeysAPI) (bool, error) { ctx, cancel := contextWithCommandTimeout(c) resp, err := ki.Get(ctx, dir, &client.GetOptions{}) cancel() if err != nil { return false, err } if resp.Node.Dir { return false, nil } return true, nil }
func dirExists(dir string, c *cli.Context, ki client.KeysAPI) (bool, error) { ctx, cancel := contextWithCommandTimeout(c) _, err := ki.Get(ctx, dir, &client.GetOptions{}) cancel() if err != nil { if cerr, ok := err.(client.Error); ok && cerr.Code == 100 { return false, nil } return false, err } return true, nil }
func doDumpQueue(kapi client.KeysAPI) { var key = queuebase + *dumpqueuename resp, err := kapi.Get(context.TODO(), key, &client.GetOptions{Sort: true}) if err != nil { log.Fatal(err) } for _, v := range resp.Node.Nodes { fmt.Println(v.Key + " set to " + v.Value) } }
func readServices(kapi client.KeysAPI) []Service { resp, err := kapi.Get(context.Background(), "/ft/services/", &client.GetOptions{Recursive: true}) if err != nil { log.Println("error reading etcd keys") if e, _ := err.(client.Error); e.Code == etcderr.EcodeKeyNotFound { log.Println("core key not found") return []Service{} } log.Panicf("failed to read from etcd: %v\n", err.Error()) } if !resp.Node.Dir { log.Panicf("%v is not a directory", resp.Node.Key) } var services []Service for _, node := range resp.Node.Nodes { if !node.Dir { log.Printf("skipping non-directory %v\n", node.Key) continue } service := Service{ Name: filepath.Base(node.Key), Addresses: make(map[string]string), PathPrefixes: make(map[string]string), PathHosts: make(map[string]string), } for _, child := range node.Nodes { switch filepath.Base(child.Key) { case "healthcheck": service.HasHealthCheck = child.Value == "true" case "servers": for _, server := range child.Nodes { service.Addresses[filepath.Base(server.Key)] = server.Value } case "path-regex": for _, path := range child.Nodes { service.PathPrefixes[filepath.Base(path.Key)] = path.Value } case "path-host": for _, path := range child.Nodes { service.PathHosts[filepath.Base(path.Key)] = path.Value } case "failover-predicate": service.FailoverPredicate = child.Value default: fmt.Printf("skipped key %v for node %v\n", child.Key, child) } } services = append(services, service) } return services }
// mkdirCommandFunc executes the "mkdir" command. func mkdirCommandFunc(c *cli.Context, ki client.KeysAPI, prevExist client.PrevExistType) { if len(c.Args()) == 0 { handleError(ExitBadArgs, errors.New("key required")) } key := c.Args()[0] ttl := c.Int("ttl") _, err := ki.Set(context.TODO(), key, "", &client.SetOptions{TTL: time.Duration(ttl) * time.Second, Dir: true, PrevExist: prevExist}) if err != nil { handleError(ExitServerError, err) } }