// acquireOrRenewLease either races to acquire a new master lease, or update the existing master's lease // returns true if we have the lease, and an error if one occurs. // TODO: use the master election utility once it is merged in. func (c *Config) acquireOrRenewLease(etcdClient *etcd.Client) (bool, error) { result, err := etcdClient.Get(c.key, false, false) if err != nil { if tools.IsEtcdNotFound(err) { // there is no current master, try to become master, create will fail if the key already exists _, err := etcdClient.Create(c.key, c.whoami, c.ttl) if err != nil { return false, err } c.lastLease = time.Now() return true, nil } return false, err } if result.Node.Value == c.whoami { glog.Infof("key already exists, we are the master (%s)", result.Node.Value) // we extend our lease @ 1/2 of the existing TTL, this ensures the master doesn't flap around if result.Node.Expiration.Sub(time.Now()) < time.Duration(c.ttl/2)*time.Second { _, err := etcdClient.CompareAndSwap(c.key, c.whoami, c.ttl, c.whoami, result.Node.ModifiedIndex) if err != nil { return false, err } } c.lastLease = time.Now() return true, nil } glog.Infof("key already exists, the master is %s, sleeping.", result.Node.Value) return false, nil }
func GetIndex(client *etcd.Client, basedir string, jobname string, ip string) (int, error) { jobdir := "/jobs" + "/" + jobname response, err := client.Get(jobdir, true, true) if err == nil { for i := 0; i < response.Node.Nodes.Len(); i++ { if response.Node.Nodes[i].Value == ip { return i, nil } } } response, err = client.AddChild(jobdir, ip, 0) if err != nil { fmt.Printf("use etcd to get index error: %v\n", err) return 0, err } mykey := response.Node.Key response, err = client.Get(jobdir, true, true) if err != nil { fmt.Printf("get etcd jobdir error: %v\n", err) return 0, err } for i := 0; i < response.Node.Nodes.Len(); i++ { if response.Node.Nodes[i].Key == mykey { return i, nil } } // this line would never reach. return 0, errors.New("etcd add child error!") }
// getAddress will return the host:port address of the service taking care of // the task that we want to talk to. // Currently we grab the information from etcd every time. Local cache could be used. // If it failed, e.g. network failure, it should return error. func GetAddress(client *etcd.Client, name string, id uint64) (string, error) { resp, err := client.Get(TaskMasterPath(name, id), false, false) if err != nil { return "", err } return resp.Node.Value, nil }
// latestRunningVersion retrieves the highest version of the application published // to etcd. If no app has been published, returns 0. func latestRunningVersion(client *etcd.Client, appName string) int { r := regexp.MustCompile(appNameRegex) if client == nil { // FIXME: client should only be nil during tests. This should be properly refactored. if appName == "ceci-nest-pas-une-app" { return 3 } return 0 } resp, err := client.Get(fmt.Sprintf("/deis/services/%s", appName), false, true) if err != nil { // no app has been published here (key not found) or there was an error return 0 } var versions []int for _, node := range resp.Node.Nodes { match := r.FindStringSubmatch(node.Key) // account for keys that may not be an application container if match == nil { continue } version, err := strconv.Atoi(match[2]) if err != nil { log.Println(err) return 0 } versions = append(versions, version) } return max(versions) }
func GetAndWatchEpoch(client *etcd.Client, appname string, epochC chan uint64, stop chan bool) (uint64, error) { resp, err := client.Get(EpochPath(appname), false, false) if err != nil { log.Fatal("etcdutil: can not get epoch from etcd") } ep, err := strconv.ParseUint(resp.Node.Value, 10, 64) if err != nil { return 0, err } receiver := make(chan *etcd.Response, 1) go client.Watch(EpochPath(appname), resp.EtcdIndex+1, false, receiver, stop) go func() { for resp := range receiver { if resp.Action != "compareAndSwap" && resp.Action != "set" { continue } epoch, err := strconv.ParseUint(resp.Node.Value, 10, 64) if err != nil { log.Fatal("etcdutil: can't parse epoch from etcd") } epochC <- epoch } }() return ep, nil }
func getRuntimeConfFromEtcd(client *etcd.Client, etcd_path string) (*RuntimeConfig, error) { resp, err := client.Get(etcd_path, false, false) if err != nil { return nil, fmt.Errorf("failed to get RuntimeConfig:%v", err) } r := bytes.NewReader([]byte(resp.Node.Value)) return ReadRuntimeConfig(r) }
// On any change we just refresh everything func handleChange(etcdClient *etcd.Client) { resp, err := etcdClient.Get(etcdWatchKey, false, true) if err != nil { fmt.Println(err) return } // Go and fetch all the services in REDIS redisClient := clients.RedisClient() if (redisClient == nil) { fmt.Println("Couldn't connect to redis") return } defer redisClient.Close() keys, err := redis.Strings(redisClient.Do("KEYS", "frontend:*")) if err != nil { fmt.Println(err) return } // Now we delete everything in redis and add everything back in from etcd redisClient.Send("MULTI") if len(keys) > 0 { redisClient.Send("DEL", redis.Args{}.AddFlat(keys)...) } for _, node := range resp.Node.Nodes { // This first level is a frontend split := strings.Split(node.Key, "/") domain := split[len(split) - 1] frontendKey := "frontend:" + domain // Belt and braces - delete this key just in case its not already (but it should be) redisClient.Send("DEL", frontendKey) redisClient.Send("RPUSH", frontendKey, domain) for _, instNode := range node.Nodes { instsplit := strings.Split(instNode.Key, "/") inst := instsplit[len(instsplit) - 1] host := "http://" + inst redisClient.Send("RPUSH", frontendKey, host) } } // Should be everything _, err = redisClient.Do("EXEC") if err != nil { fmt.Println(err) return } fmt.Println("Resynced hipache from etcd") }
func getEtcdRoute(client *etcd.Client) *Route { hostResp, err := client.Get("/deis/logs/host", false, false) assert(err, "url") portResp, err := client.Get("/deis/logs/port", false, false) assert(err, "url") protocol := getEtcdValueOrDefault(client, "/deis/logs/protocol", "udp") host := fmt.Sprintf("%s:%s", hostResp.Node.Value, portResp.Node.Value) log.Printf("routing all to %s://%s", protocol, host) return &Route{ID: "etcd", Target: Target{Type: "syslog", Addr: host, Protocol: protocol}} }
func getEtcdValueOrDefault(c *etcd.Client, key string, defaultValue string) string { resp, err := c.Get(key, false, false) if err != nil { if strings.Contains(fmt.Sprintf("%v", err), "Key not found") { return defaultValue } assert(err, "url") } return resp.Node.Value }
func getCurrentVersion(client *etcd.Client, key string) *etcd.Response { for { if resp, err := client.Get(key, false, false); err == nil { // failed to fetch first value return resp } else { time.Sleep(time.Second) } } }
func get(e *etcd.Client, q string, t uint16) ([]dns.RR, error) { path := questionToPath(q, t) r, err := e.Get(path, false, false) if err != nil { return nil, err } h := dns.RR_Header{Name: q, Rrtype: t, Class: dns.ClassINET, Ttl: 60} // Ttl is overridden rr := parseValue(t, r.Node.Value, h) return []dns.RR{rr}, nil }
func loadConfig(client *etcd.Client, config *server.Config) error { // Override what isn't set yet from the command line. n, err := client.Get("/"+msg.PathPrefix+"/config", false, false) if err != nil { log.Printf("skydns: falling back to default configuration, could not read from etcd: %s", err) return nil } if err := json.Unmarshal([]byte(n.Node.Value), config); err != nil { return fmt.Errorf("failed to unmarshal config: %s", err.Error()) } return nil }
// WaitFreeTask blocks until it gets a hint of free task func WaitFreeTask(client *etcd.Client, name string, logger *log.Logger) (uint64, error) { slots, err := client.Get(FreeTaskDir(name), false, true) if err != nil { return 0, err } if total := len(slots.Node.Nodes); total > 0 { ri := rand.Intn(total) s := slots.Node.Nodes[ri] idStr := path.Base(s.Key) id, err := strconv.ParseUint(idStr, 0, 64) if err != nil { return 0, err } logger.Printf("got free task %v, randomly choose %d to try...", ListKeys(slots.Node.Nodes), ri) return id, nil } watchIndex := slots.EtcdIndex + 1 respChan := make(chan *etcd.Response, 1) go func() { for { logger.Printf("start to wait failure at index %d", watchIndex) resp, err := client.Watch(FreeTaskDir(name), watchIndex, true, nil, nil) if err != nil { logger.Printf("WARN: WaitFailure watch failed: %v", err) return } if resp.Action == "set" { respChan <- resp return } watchIndex = resp.EtcdIndex + 1 } }() var resp *etcd.Response var waitTime uint64 = 0 for { select { case resp = <-respChan: idStr := path.Base(resp.Node.Key) id, err := strconv.ParseUint(idStr, 10, 64) if err != nil { return 0, err } return id, nil case <-time.After(10 * time.Second): waitTime++ logger.Printf("Node already wait failure for %d0s", waitTime) } } }
// TestEtcdClient verifies a client is functional. It will attempt to // connect to the etcd server and block until the server responds at least once, or return an // error if the server never responded. func TestEtcdClient(etcdClient *etcdclient.Client) error { for i := 0; ; i++ { _, err := etcdClient.Get("/", false, false) if err == nil || etcdutil.IsEtcdNotFound(err) { break } if i > 100 { return fmt.Errorf("could not reach etcd: %v", err) } time.Sleep(50 * time.Millisecond) } return nil }
func loop(channel chan *store.Response, etcdClient *etcd.Client, nodename string) { for { responses, err := etcdClient.Get(nodename) if err != nil { //print an error } else { for _, response := range responses { channel <- response } } time.Sleep(time.Second * 2) } }
// Demarshal takes an etcd client and a an anonymous interface to // seed with values from etcd. This will throw an error if there is an exceptional // error in the etcd client or you are invoking this incorrectly with maps. // Any missing keys in etcd will be filled in with blank strings. func Demarshal(etcd *etcd.Client, target interface{}) (err error) { val := reflect.ValueOf(target).Elem() for i := 0; i < val.NumField(); i++ { valueField := val.Field(i) typeField := val.Type().Field(i) tag := typeField.Tag if tag.Get("etcd") == "" { continue } switch valueField.Kind() { case reflect.Bool: if _, notok := etcd.Get(tag.Get("etcd"), false, false); notok == nil { valueField.SetBool(true) } else { valueField.SetBool(false) } case reflect.String: etcdval, err := etcd.Get(tag.Get("etcd"), false, false) if err != nil { valueField.SetString("") } valueField.Set(reflect.ValueOf(etcdval.Node.Value)) case reflect.Map: keyKind := typeField.Type.Key().Kind() if keyKind != reflect.String { return errors.New("Map must be string[string]") } resp, err := etcd.Get(tag.Get("etcd"), true, true) if err != nil { return err } if !resp.Node.Dir { return errors.New("maps must be pointed at an etcd directory") } SetMapOffDir(resp.Node, &valueField, "") } } return }
func LoadConfig(client *etcd.Client) (*Config, error) { n, err := client.Get("/skydns/config", false, false) config := &Config{ReadTimeout: 0, WriteTimeout: 0, Domain: "", DnsAddr: "", Nameservers: []string{""}, DNSSEC: ""} if err != nil { return config, nil } if err := json.Unmarshal([]byte(n.Node.Value), &config); err != nil { return nil, err } if err := setDefaults(config); err != nil { return nil, err } return config, nil }
func checkStats(cli *etcd.Client, conf *config.FiddlerConf, pool spawner.SpawnPool) (err error) { // Pull aggregates of the information from config serverResp, err := cli.Get("fiddler/servers") if err != nil { return err } serverCount := len(serverResp) grow, shrink := false, false // We'll track both if serverCount < conf.Scale.Max { // We might want to Grow // Let's see if we fit any of the parameters for stat, val := range conf.Scale.Grow { if pass, err := check(cli, stat, val); err != nil { return err } else if pass { grow = true } } } if serverCount > conf.Scale.Min { // We might want to Shrink // Let's see if we fit any of the parameters for stat, val := range conf.Scale.Shrink { if pass, err := check(cli, stat, val); err != nil { return err } else if pass { shrink = true } } } if grow && shrink { log.Println("Fail as we want to grow and shrink.") } else if grow { log.Println("I want to grow") // TODO: We need to track the spawning of instances // TODO: We need to come up with a heuristic of when to go. Every spike != growth // pool.Grow() } else if shrink { log.Println("I want to shrink") // pool.Shrink() } return }
func (etcd *ETCDClusterRunner) deleteDir(client *etcdclient.Client, dir string) { responses, err := client.Get(dir) Ω(err).ShouldNot(HaveOccured()) for _, response := range responses { if response.Key != "/_etcd" { if response.Dir == true { etcd.deleteDir(client, response.Key) } else { _, err := client.Delete(response.Key) Ω(err).ShouldNot(HaveOccured()) } } } }
// get is a wrapper for client.Get that uses SingleInflight to suppress multiple // outstanding queries. func get(client *etcd.Client, path string, recursive bool) (*etcd.Response, error) { resp, err, _ := etcdInflight.Do(path, func() (*etcd.Response, error) { r, e := client.Get(path, false, recursive) if e != nil { return nil, e } return r, e }) if err != nil { return resp, err } // shared? return resp, err }
func loadConfig(client *etcd.Client, config *Config) (*Config, error) { n, err := client.Get("/bind/config", false, false) if err != nil { if err := setDefaults(config); err != nil { return nil, err } return config, nil } if err := json.Unmarshal([]byte(n.Node.Value), &config); err != nil { return nil, err } if err := setDefaults(config); err != nil { return nil, err } return config, nil }
// etcdGetInitialWatchState turns an etcd Get request into a watch equivalent func etcdGetInitialWatchState(client *etcd.Client, key string, recursive bool, incoming chan<- *etcd.Response) (resourceVersion uint64, err error) { resp, err := client.Get(key, false, recursive) if err != nil { if !etcdutil.IsEtcdNotFound(err) { glog.Errorf("watch was unable to retrieve the current index for the provided key (%q): %v", key, err) return resourceVersion, err } if etcdError, ok := err.(*etcd.EtcdError); ok { resourceVersion = etcdError.Index } return resourceVersion, nil } resourceVersion = resp.EtcdIndex convertRecursiveResponse(resp.Node, resp, incoming) return }
func getStats(cli *etcd.Client, metric string) (values []float64, err error) { serverResp, err := cli.Get(path.Join("fiddler/stats", metric)) if err != nil { return nil, err } values = make([]float64, len(serverResp)) for i, resp := range serverResp { if values[i], err = strconv.ParseFloat(resp.Value, 64); err != nil { return nil, err } } log.Printf("Metric `%s` values: %v\n", metric, values) return }
func (b *backends) Sync(client *etcd.Client) error { resp, err := client.Get(b.path, false, true) if err != nil { return err } for _, n := range resp.Node.Nodes { b.Update(&n, resp.Action) } // Begin the watch after this sync from the next sync b.watchIndex = resp.EtcdIndex + 1 return nil }
func WatchMeta(c *etcd.Client, path string, stop chan bool, responseHandler func(*etcd.Response)) error { resp, err := c.Get(path, false, false) if err != nil { return err } // Get previous meta. We need to handle it. if resp.Node.Value != "" { go responseHandler(resp) } receiver := make(chan *etcd.Response, 1) go c.Watch(path, resp.EtcdIndex+1, false, receiver, stop) go func(receiver chan *etcd.Response) { for resp := range receiver { responseHandler(resp) } }(receiver) return nil }
// getCommandFunc executes the "get" command. func getCommandFunc(cmd *cobra.Command, args []string, client *etcd.Client) (*etcd.Response, error) { if len(args) == 0 { return nil, errors.New("Key required") } key := args[0] consistent := getConsistentFlag sorted := getSortFlag // Setup consistency on the client. if consistent { client.SetConsistency(etcd.STRONG_CONSISTENCY) } else { client.SetConsistency(etcd.WEAK_CONSISTENCY) } // Retrieve the value from the server. return client.Get(key, sorted, false) }
func (locker *EtcdLocker) acquire(client *etcd.Client, key string, ttl uint64, wait bool) (Lock, error) { hasLock := false key = addPrefix(key) lock, err := addLockDirChild(client, key) if err != nil { return nil, errgo.Mask(err) } for !hasLock { res, err := client.Get(key, true, true) if err != nil { return nil, errgo.Mask(err) } if len(res.Node.Nodes) > 1 { sort.Sort(res.Node.Nodes) if res.Node.Nodes[0].CreatedIndex != lock.Node.CreatedIndex { if !wait { client.Delete(lock.Node.Key, false) return nil, &Error{res.Node.Nodes[0].Value} } else { err = locker.Wait(lock.Node.Key) if err != nil { return nil, errgo.Mask(err) } } } else { // if the first index is the current one, it's our turn to lock the key hasLock = true } } else { // If there are only 1 node, it's our, lock is acquired hasLock = true } } // If we get the lock, set the ttl and return it _, err = client.Update(lock.Node.Key, lock.Node.Value, ttl) if err != nil { return nil, errgo.Mask(err) } return &EtcdLock{client, lock.Node.Key, lock.Node.CreatedIndex}, nil }
func GetBackends(client *etcd.Client, service, backendName string) ([]Backend, error) { resp, err := client.Get(service, false, true) if err != nil { log.Println("Error when reading etcd: ", err) return nil, err } else { backends := make([]Backend, len(resp.Node.Nodes)) for index, element := range resp.Node.Nodes { key := (*element).Key // key format is: /service/IP:PORT service := strings.Split(key[strings.LastIndex(key, "/")+1:], ":") backends[index] = Backend{Name: fmt.Sprintf("back-%v", index), Ip: service[0], Port: service[1]} } return backends, nil } }
func Ls(client *etcd.Client, path string) ([]string, error) { result := make([]string, 0) response, err := client.Get(path, true, false) if err != nil { return nil, err } //文件,列出自身路径 if response.Node.Nodes == nil { return append(result, response.Node.Value), nil } //目录,列出目录内容 for _, n := range response.Node.Nodes { result = append(result, n.Value) } return result, nil }
// CanConnect checks a given SSH key against the list of authorized users in etcd // to check if a user with a given key is allowed to connect. It takes in an active // etcd.Client struct pointer and the ssh key to test and returns a username // and boolean representing if they are allowed to connect. func CanConnect(e *etcd.Client, sshkey string) (user string, allowed bool) { reply, err := e.Get("/flitter/builder/users/", true, true) if err != nil { log.Printf("etcd: %s", err) return "", false } keybit := strings.Split(sshkey, " ")[1] fp := GetFingerprint(keybit) for _, userdir := range reply.Node.Nodes { for _, fpnode := range userdir.Nodes { thisFpSplit := strings.Split(fpnode.Key, "/") thisFp := thisFpSplit[len(thisFpSplit)-1] if fp == thisFp { userpath := strings.Split(userdir.Key, "/") user := userpath[len(userpath)-1] return user, true } } } return }