func (u *update) shouldRollAfterDelay(podID types.PodID) (int, int, error) { // Check health again following the roll delay. If things have gotten // worse since we last looked, or there is an error, we break this iteration. checks, err := u.hcheck.Service(podID.String()) if err != nil { return 0, 0, util.Errorf("Could not retrieve health following delay: %v", err) } afterDelayNew, err := u.countHealthy(u.NewRC, checks) if err != nil { return 0, 0, util.Errorf("Could not determine new service health: %v", err) } afterDelayOld, err := u.countHealthy(u.OldRC, checks) if err != nil { return 0, 0, util.Errorf("Could not determine old service health: %v", err) } afterDelayRemove, afterDelayAdd := rollAlgorithm(u.rollAlgorithmParams(afterDelayOld, afterDelayNew)) if afterDelayRemove <= 0 && afterDelayAdd <= 0 { return 0, 0, util.Errorf("No nodes can be safely updated after %v roll delay, will wait again", u.RollDelay) } return afterDelayRemove, afterDelayAdd, nil }
func (s *fakeKpStore) Pod(podPrefix kp.PodPrefix, nodeName types.NodeName, podID types.PodID) ( manifest.Manifest, time.Duration, error) { key := path.Join(string(podPrefix), nodeName.String(), podID.String()) if manifest, ok := s.manifests[key]; ok { return manifest, 0, nil } return nil, 0, pods.NoCurrentManifest }
// NewLegacyP2RM is a constructor for the P2RM type which configures it to // remove a "legacy" pod. It will generate the storage types based on its // api.Client argument func NewLegacyP2RM(client consulutil.ConsulClient, podName types.PodID, nodeName types.NodeName, labeler labels.ApplicatorWithoutWatches) *P2RM { rm := &P2RM{} rm.LabelID = path.Join(nodeName.String(), podName.String()) rm.PodID = podName rm.NodeName = nodeName rm.PodUniqueKey = "" rm.configureStorage(client, labeler) return rm }
func computeUniqueName(id types.PodID, uniqueKey types.PodUniqueKey) string { name := id.String() if uniqueKey != "" { // If the pod was scheduled with a UUID, we want to namespace its pod home // with the same uuid. This enables multiple pods with the same pod ID to // exist on the same filesystem name = fmt.Sprintf("%s-%s", name, uniqueKey) } return name }
func (s *consulStore) FindWhereLabeled(podID types.PodID, availabilityZone fields.AvailabilityZone, clusterName fields.ClusterName) ([]fields.PodCluster, error) { sel := klabels.Everything(). Add(fields.PodIDLabel, klabels.EqualsOperator, []string{podID.String()}). Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{availabilityZone.String()}). Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{clusterName.String()}) podClusters, err := s.applicator.GetMatches(sel, labels.PC) if err != nil { return nil, err } ret := make([]fields.PodCluster, len(podClusters)) for i, pc := range podClusters { ret[i], err = s.Get(fields.ID(pc.ID)) if err != nil { return nil, err } } return ret, nil }
func pcCreateLockPath(podID types.PodID, availabilityZone fields.AvailabilityZone, clusterName fields.ClusterName) string { return path.Join(consulutil.LOCK_TREE, podID.String(), availabilityZone.String(), clusterName.String()) }
// Returns the consul path to use when locking out any other pkg/replication-based deploys // for a given pod ID func ReplicationLockPath(podId types.PodID) string { return path.Join(consulutil.LOCK_TREE, "replication", podId.String()) }
func selectorFrom(az fields.AvailabilityZone, cn fields.ClusterName, podID types.PodID) klabels.Selector { return klabels.Everything(). Add(fields.PodIDLabel, klabels.EqualsOperator, []string{podID.String()}). Add(fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{az.String()}). Add(fields.ClusterNameLabel, klabels.EqualsOperator, []string{cn.String()}) }
func (s *fakeKpStore) DeletePod(podPrefix kp.PodPrefix, nodeName types.NodeName, podID types.PodID) (time.Duration, error) { key := path.Join(string(podPrefix), nodeName.String(), podID.String()) delete(s.manifests, key) return 0, nil }
func (f *hookFactory) NewHookPod(id types.PodID) *Pod { home := filepath.Join(f.hookRoot, id.String()) return newPodWithHome(id, home, f.node) }
// these utility functions are used primarily while we exist in a mutable // deployment world. We will need to figure out how to replace these with // different datasources to allow RCs and DSs to continue to function correctly // in the future. func MakePodLabelKey(node types.NodeName, podID types.PodID) string { return node.String() + "/" + podID.String() }
func (f *factory) NewLegacyPod(id types.PodID) *Pod { home := filepath.Join(f.podRoot, id.String()) return newPodWithHome(id, "", home, f.node) }