// attempts to unlock the targeted key - since lock keys are ephemeral, this // will delete it, but only if it is held by the current lock func (l Lock) Unlock(key string) error { kvp, meta, err := l.client.KV().Get(key, nil) if err != nil { return consulutil.NewKVError("get", key, err) } if kvp == nil { return nil } if kvp.Session != l.session { return AlreadyLockedError{Key: key} } success, _, err := l.client.KV().DeleteCAS(&api.KVPair{ Key: key, ModifyIndex: meta.LastIndex, }, nil) if err != nil { return consulutil.NewKVError("deletecas", key, err) } if !success { // the key has been mutated since we checked it - probably someone // overrode our lock on it or deleted it themselves return AlreadyLockedError{Key: key} } return nil }
func (c consulHealthChecker) WatchService( serviceID string, resultCh chan<- map[string]health.Result, errCh chan<- error, quitCh <-chan struct{}, ) { defer close(resultCh) var curIndex uint64 = 0 for { select { case <-quitCh: return case <-time.After(1 * time.Second): results, _, err := c.client.KV().List(kp.HealthPath(serviceID, ""), &api.QueryOptions{ WaitIndex: curIndex, }) if err != nil { errCh <- consulutil.NewKVError("list", kp.HealthPath(serviceID, ""), err) } else { out := make(map[string]health.Result) for _, result := range results { var next kp.WatchResult err = json.Unmarshal(result.Value, &next) if err != nil { errCh <- err } else { out[next.Node] = consulWatchToResult(next) } } resultCh <- out } } } }
func (s *consulStore) List() ([]fields.DaemonSet, error) { listed, _, err := s.kv.List(dsTree+"/", nil) if err != nil { return nil, consulutil.NewKVError("list", dsTree+"/", err) } return kvpsToDSs(listed) }
// these parts of Create may require a retry func (s *consulStore) innerCreate(manifest pods.Manifest, nodeSelector klabels.Selector, podLabels klabels.Set) (fields.RC, error) { id := fields.ID(uuid.New()) rcp := kp.RCPath(id.String()) rc := fields.RC{ ID: id, Manifest: manifest, NodeSelector: nodeSelector, PodLabels: podLabels, ReplicasDesired: 0, Disabled: false, } jsonRC, err := json.Marshal(rc) if err != nil { return fields.RC{}, err } success, _, err := s.kv.CAS(&api.KVPair{ Key: rcp, Value: jsonRC, // the chance of the UUID already existing is vanishingly small, but // technically not impossible, so we should use the CAS index to guard // against duplicate UUIDs ModifyIndex: 0, }, nil) if err != nil { return fields.RC{}, consulutil.NewKVError("cas", rcp, err) } if !success { return fields.RC{}, CASError(rcp) } return rc, nil }
// DeletePod deletes a pod manifest from the key-value store. No error will be // returned if the key didn't exist. func (c consulStore) DeletePod(key string) (time.Duration, error) { writeMeta, err := c.client.KV().Delete(key, nil) if err != nil { return 0, consulutil.NewKVError("delete", key, err) } return writeMeta.RequestTime, nil }
// DEPRECATED: use one of the Create* functions instead func (s consulStore) Put(u roll_fields.Update) error { b, err := json.Marshal(u) if err != nil { return err } key, err := RollPath(u.ID()) if err != nil { return err } success, _, err := s.kv.CAS(&api.KVPair{ Key: key, Value: b, // it must not already exist ModifyIndex: 0, }, nil) if err != nil { return consulutil.NewKVError("cas", key, err) } if !success { return fmt.Errorf("update with new RC ID %s already exists", u.NewRC) } return nil }
func (c *consulStore) ReadPod(podKey types.PodUniqueKey) (Pod, error) { if podKey == "" { return Pod{}, util.Errorf("Pod store can only read pods with uuid keys") } if pod, ok := c.fetchFromCache(podKey); ok { return pod, nil } podPath := computePodPath(podKey) pair, _, err := c.consulKV.Get(podPath, nil) if err != nil { return Pod{}, consulutil.NewKVError("get", podPath, err) } if pair == nil { return Pod{}, NoPodError(podKey) } var pod Pod err = json.Unmarshal(pair.Value, &pod) if err != nil { return Pod{}, util.Errorf("Could not unmarshal pod '%s' as json: %s", podKey, err) } c.addToCache(podKey, pod) return pod, nil }
func (s *consulStore) GetAllStatusForResourceType(t ResourceType) (map[ResourceID]map[Namespace]Status, error) { prefix, err := resourceTypePath(t) if err != nil { return nil, err } ret := make(map[ResourceID]map[Namespace]Status) pairs, _, err := s.kv.List(prefix, nil) if err != nil { return nil, consulutil.NewKVError("list", prefix, err) } for _, pair := range pairs { key := pair.Key status := pair.Value _, id, namespace, err := keyParts(key) if err != nil { return nil, err } _, ok := ret[id] if !ok { ret[id] = make(map[Namespace]Status) } ret[id][namespace] = status } return ret, nil }
// Writes a key to the /reality tree to signify that the pod specified by the UUID has been // launched on the given node. func (c *consulStore) WriteRealityIndex(podKey types.PodUniqueKey, node types.NodeName) error { if podKey == "" { return util.Errorf("Pod store can only write index for pods with uuid keys") } realityIndexPath := computeRealityIndexPath(podKey, node) // Now, write the secondary index to /intent/<node>/<key> index := PodIndex{ PodKey: podKey, } indexBytes, err := json.Marshal(index) if err != nil { return util.Errorf("Could not marshal index as json: %s", err) } indexPair := &api.KVPair{ Key: realityIndexPath, Value: indexBytes, } _, err = c.consulKV.Put(indexPair, nil) if err != nil { return consulutil.NewKVError("put", realityIndexPath, err) } return nil }
func (s *consulStore) List() ([]fields.RC, error) { listed, _, err := s.kv.List(rcTree+"/", nil) if err != nil { return nil, consulutil.NewKVError("list", rcTree+"/", err) } return kvpsToRCs(listed) }
func (c consulStore) PutHealth(res WatchResult) (time.Time, time.Duration, error) { key := HealthPath(res.Service, res.Node) now := time.Now() res.Time = now res.Expires = now.Add(TTL) data, err := json.Marshal(res) if err != nil { return time.Time{}, 0, err } keyPair := &api.KVPair{ Key: key, Value: data, } writeMeta, err := c.client.KV().Put(keyPair, nil) var retDur time.Duration if writeMeta != nil { retDur = writeMeta.RequestTime } if err != nil { return now, retDur, consulutil.NewKVError("put", key, err) } return now, retDur, nil }
// SetPod writes a pod manifest into the consul key-value store. func (c consulStore) SetPod(podPrefix PodPrefix, nodename types.NodeName, manifest manifest.Manifest) (time.Duration, error) { buf := bytes.Buffer{} err := manifest.Write(&buf) if err != nil { return 0, err } key, err := podPath(podPrefix, nodename, manifest.ID()) if err != nil { return 0, err } keyPair := &api.KVPair{ Key: key, Value: buf.Bytes(), } writeMeta, err := c.client.KV().Put(keyPair, nil) var retDur time.Duration if writeMeta != nil { retDur = writeMeta.RequestTime } if err != nil { return retDur, consulutil.NewKVError("put", key, err) } return retDur, nil }
func (s *consulStore) Get(id fields.ID) (fields.DaemonSet, *api.QueryMeta, error) { var metadata *api.QueryMeta dsPath, err := s.dsPath(id) if err != nil { return fields.DaemonSet{}, metadata, util.Errorf("Error getting daemon set path: %v", err) } kvp, metadata, err := s.kv.Get(dsPath, nil) if err != nil { return fields.DaemonSet{}, metadata, consulutil.NewKVError("get", dsPath, err) } if metadata == nil { // no metadata returned return fields.DaemonSet{}, metadata, errors.New("No metadata found") } if kvp == nil { // ID didn't exist return fields.DaemonSet{}, metadata, NoDaemonSet } ds, err := kvpToDS(kvp) if err != nil { return fields.DaemonSet{}, metadata, util.Errorf("Error translating kvp to daemon set: %v", err) } return ds, metadata, nil }
func (s consulStore) Delete(id rcf.ID) error { key := kp.RollPath(id.String()) _, err := s.kv.Delete(key, nil) if err != nil { return consulutil.NewKVError("delete", key, err) } return nil }
// Attempts to create a rolling update. Checks sessionErrCh for session renewal // errors just before actually doing the creation to minimize the likelihood of // race conditions resulting in conflicting RUs func (s consulStore) attemptRUCreation(u roll_fields.Update, rollLabels klabels.Set, sessionErrCh chan error) (createdRU roll_fields.Update, err error) { // If we create an RU, we also want to create its labels. If the second step // fails, we want to best-effort remove the RU var ruCleanup func() defer func() { if err != nil && ruCleanup != nil { ruCleanup() } }() b, err := json.Marshal(u) if err != nil { return u, err } key, err := RollPath(roll_fields.ID(u.NewRC)) if err != nil { return u, err } // Confirm that our lock session is still valid, and then create the // rolling update. If session isn't valid, delete the newRC we just // created select { case err := <-sessionErrCh: if err == nil { err = util.Errorf("Cannot create ru because session was destroyed") } return u, err default: success, _, err := s.kv.CAS(&api.KVPair{ Key: key, Value: b, }, nil) if err != nil { return u, consulutil.NewKVError("cas", key, err) } // Shouldn't be possible if our session is still valid, preventing other insertions if !success { return u, util.Errorf("update with new RC ID %s already exists", u.NewRC) } ruCleanup = func() { err := s.Delete(u.ID()) if err != nil { s.logger.WithError(err).Errorln("Unable to cleanup RU %s after failed labeling attempt", u.ID()) } } } err = s.labeler.SetLabels(labels.RU, u.ID().String(), rollLabels) if err != nil { return roll_fields.Update{}, err } return u, nil }
func (c *consulStore) DeleteRealityIndex(podKey types.PodUniqueKey, node types.NodeName) error { realityIndexPath := computeRealityIndexPath(podKey, node) _, err := c.consulKV.Delete(realityIndexPath, nil) if err != nil { return consulutil.NewKVError("delete", realityIndexPath, err) } return nil }
func (c consulStore) GetHealth(service string, node types.NodeName) (WatchResult, error) { healthRes := &WatchResult{} key := HealthPath(service, node) res, _, err := c.client.KV().Get(key, nil) if err != nil { return WatchResult{}, consulutil.NewKVError("get", key, err) } else if res == nil { return WatchResult{}, nil } err = json.Unmarshal(res.Value, healthRes) if err != nil { return WatchResult{}, consulutil.NewKVError("get", key, err) } if healthRes.IsStale() { return *healthRes, consulutil.NewKVError("get", key, fmt.Errorf("stale health entry")) } return *healthRes, nil }
// Ping confirms that Consul can be reached and it has a leader. If the return // is nil, then consul should be ready to accept requests. // // If the return is non-nil, this typically indicates that either Consul is // unreachable (eg the agent is not listening on the target port) or has not // found a leader (in which case Consul returns a 500 to all endpoints, except // the status types). // // If a cluster is starting for the first time, it may report a leader just // before beginning raft replication, thus rejecting requests made at that // exact moment. func Ping(client *api.Client) error { _, qm, err := client.Catalog().Nodes(&api.QueryOptions{RequireConsistent: true}) if err != nil { return consulutil.NewKVError("ping", "/catalog/nodes", err) } if qm == nil || !qm.KnownLeader { return util.Errorf("No known leader") } return nil }
// Pod reads a pod manifest from the key-value store. If the given key does not // exist, a nil *PodManifest will be returned, along with a pods.NoCurrentManifest // error. func (c consulStore) Pod(key string) (pods.Manifest, time.Duration, error) { kvPair, writeMeta, err := c.client.KV().Get(key, nil) if err != nil { return nil, 0, consulutil.NewKVError("get", key, err) } if kvPair == nil { return nil, writeMeta.RequestTime, pods.NoCurrentManifest } manifest, err := pods.ManifestFromBytes(kvPair.Value) return manifest, writeMeta.RequestTime, err }
func (c consulStore) GetServiceHealth(service string) (map[string]WatchResult, error) { healthRes := make(map[string]WatchResult) key := HealthPath(service, "") res, _, err := c.client.KV().List(key, nil) if err != nil { return healthRes, consulutil.NewKVError("list", key, err) } else if res == nil { return healthRes, nil } for _, kvp := range res { watch := &WatchResult{} err = json.Unmarshal(kvp.Value, watch) if err != nil { return healthRes, consulutil.NewKVError("get", key, err) } // maps key to result (eg /health/hello/nodename) healthRes[kvp.Key] = *watch } return healthRes, nil }
func (s consulStore) Lock(id rcf.ID, session string) (bool, error) { key := kp.LockPath(kp.RollPath(id.String())) success, _, err := s.kv.Acquire(&api.KVPair{ Key: key, Value: []byte(session), Session: session, }, nil) if err != nil { return false, consulutil.NewKVError("acquire", key, err) } return success, nil }
// DeletePod deletes a pod manifest from the key-value store. No error will be // returned if the key didn't exist. func (c consulStore) DeletePod(podPrefix PodPrefix, nodename types.NodeName, podId types.PodID) (time.Duration, error) { key, err := podPath(podPrefix, nodename, podId) if err != nil { return 0, err } writeMeta, err := c.client.KV().Delete(key, nil) if err != nil { return 0, consulutil.NewKVError("delete", key, err) } return writeMeta.RequestTime, nil }
func (s *consulStore) Delete(id fields.ID) error { dsPath, err := s.dsPath(id) if err != nil { return util.Errorf("Error getting daemon set path: %v", err) } _, err = s.kv.Delete(dsPath, nil) if err != nil { return consulutil.NewKVError("delete", dsPath, err) } return nil }
func (c *consulStore) Unschedule(podKey types.PodUniqueKey) error { if podKey == "" { return util.Errorf("Pod store can only delete pods with uuid keys") } // Read the pod so we know which node the secondary index will have pod, err := c.ReadPod(podKey) if err != nil { return err } podPath := computePodPath(podKey) intentIndexPath := computeIntentIndexPath(podKey, pod.Node) // Due to lack of transactionality in deleting both keys, the below code has the // following steps: // 1) attempt to delete the main pod key. If it fails, return an error without // deleting the index. The caller can retry the deletion if it matters // 2) attempt to delete the index. If this fails, return a special error type // so errors know they shouldn't retry the deletion. A "sweeper" process is // planned to remove hanging indices which will clean up the problem eventually. // TODO: use a transaction when we can rely upon consul 0.7 _, err = c.consulKV.Delete(podPath, nil) if err != nil { return consulutil.NewKVError("delete", podPath, err) } c.deleteFromCache(podKey) _, err = c.consulKV.Delete(intentIndexPath, nil) if err != nil { return IndexDeletionFailure{ path: intentIndexPath, err: consulutil.NewKVError("delete", intentIndexPath, err), } } return nil }
func (s *consulStore) DeleteStatus(t ResourceType, id ResourceID, namespace Namespace) error { key, err := namespacedResourcePath(t, id, namespace) if err != nil { return err } _, err = s.kv.Delete(key, nil) if err != nil { return consulutil.NewKVError("delete", key, err) } return nil }
func (s *consulStore) Delete(id fields.ID) error { key, err := pcPath(id) if err != nil { return err } _, err = s.kv.Delete(key, nil) if err != nil { return consulutil.NewKVError("delete", key, err) } return s.applicator.RemoveAllLabels(labels.PC, id.String()) }
func (s consulStore) Get(id rcf.ID) (rollf.Update, error) { key := kp.RollPath(id.String()) kvp, _, err := s.kv.Get(key, nil) if err != nil { return rollf.Update{}, consulutil.NewKVError("get", key, err) } var ret rollf.Update err = json.Unmarshal(kvp.Value, &ret) if err != nil { return rollf.Update{}, err } return ret, nil }
// determine the name and ID of the session that is locking this key, if any func (c consulStore) LockHolder(key string) (string, string, error) { kvp, _, err := c.client.KV().Get(key, nil) if err != nil { return "", "", consulutil.NewKVError("get", key, err) } if kvp == nil || kvp.Session == "" { return "", "", nil } se, _, err := c.client.Session().Info(kvp.Session, nil) if err != nil { return "", "", util.Errorf("Could not get lock information for %q held by id %q", key, kvp.Session) } return se.Name, se.ID, nil }
// attempts to acquire the lock on the targeted key // keys used for locking/synchronization should be ephemeral (ie their value // does not matter and you don't care if they're deleted) func (l Lock) Lock(key string) error { success, _, err := l.client.KV().Acquire(&api.KVPair{ Key: key, Value: []byte(l.name), Session: l.session, }, nil) if err != nil { return consulutil.NewKVError("acquire lock", key, err) } if success { return nil } return AlreadyLockedError{Key: key} }
// Pod reads a pod manifest from the key-value store. If the given key does not // exist, a nil *PodManifest will be returned, along with a pods.NoCurrentManifest // error. func (c consulStore) Pod(podPrefix PodPrefix, nodename types.NodeName, podId types.PodID) (manifest.Manifest, time.Duration, error) { key, err := podPath(podPrefix, nodename, podId) if err != nil { return nil, 0, err } kvPair, writeMeta, err := c.client.KV().Get(key, nil) if err != nil { return nil, 0, consulutil.NewKVError("get", key, err) } if kvPair == nil { return nil, writeMeta.RequestTime, pods.NoCurrentManifest } manifest, err := manifest.FromBytes(kvPair.Value) return manifest, writeMeta.RequestTime, err }