// updateRcWithRetries retries updating the given rc on conflict with the following steps: // 1. Get latest resource // 2. applyUpdate // 3. Update the resource func updateRcWithRetries(rcClient coreclient.ReplicationControllersGetter, namespace string, rc *api.ReplicationController, applyUpdate updateRcFunc) (*api.ReplicationController, error) { // Deep copy the rc in case we failed on Get during retry loop obj, err := api.Scheme.Copy(rc) if err != nil { return nil, fmt.Errorf("failed to deep copy rc before updating it: %v", err) } oldRc := obj.(*api.ReplicationController) err = retry.RetryOnConflict(retry.DefaultBackoff, func() (e error) { // Apply the update, then attempt to push it to the apiserver. applyUpdate(rc) if rc, e = rcClient.ReplicationControllers(namespace).Update(rc); e == nil { // rc contains the latest controller post update return } updateErr := e // Update the controller with the latest resource version, if the update failed we // can't trust rc so use oldRc.Name. if rc, e = rcClient.ReplicationControllers(namespace).Get(oldRc.Name); e != nil { // The Get failed: Value in rc cannot be trusted. rc = oldRc } // Only return the error from update return updateErr }) // If the error is non-nil the returned controller cannot be trusted, if it is nil, the returned // controller contains the applied update. return rc, err }
// WaitForRunningDeployment waits until the specified deployment is no longer New or Pending. Returns true if // the deployment became running, complete, or failed within timeout, false if it did not, and an error if any // other error state occurred. The last observed deployment state is returned. func WaitForRunningDeployment(rn kcoreclient.ReplicationControllersGetter, observed *kapi.ReplicationController, timeout time.Duration) (*kapi.ReplicationController, bool, error) { fieldSelector := fields.Set{"metadata.name": observed.Name}.AsSelector() options := kapi.ListOptions{FieldSelector: fieldSelector, ResourceVersion: observed.ResourceVersion} w, err := rn.ReplicationControllers(observed.Namespace).Watch(options) if err != nil { return observed, false, err } defer w.Stop() if _, err := watch.Until(timeout, w, func(e watch.Event) (bool, error) { if e.Type == watch.Error { return false, fmt.Errorf("encountered error while watching for replication controller: %v", e.Object) } obj, isController := e.Object.(*kapi.ReplicationController) if !isController { return false, fmt.Errorf("received unknown object while watching for deployments: %v", obj) } observed = obj switch deployutil.DeploymentStatusFor(observed) { case api.DeploymentStatusRunning, api.DeploymentStatusFailed, api.DeploymentStatusComplete: return true, nil case api.DeploymentStatusNew, api.DeploymentStatusPending: return false, nil default: return false, ErrUnknownDeploymentPhase } }); err != nil { return observed, false, err } return observed, true, nil }
func LoadExistingNextReplicationController(c coreclient.ReplicationControllersGetter, namespace, newName string) (*api.ReplicationController, error) { if len(newName) == 0 { return nil, nil } newRc, err := c.ReplicationControllers(namespace).Get(newName) if err != nil && errors.IsNotFound(err) { return nil, nil } return newRc, err }
func CreateNewControllerFromCurrentController(rcClient coreclient.ReplicationControllersGetter, codec runtime.Codec, cfg *NewControllerConfig) (*api.ReplicationController, error) { containerIndex := 0 // load the old RC into the "new" RC newRc, err := rcClient.ReplicationControllers(cfg.Namespace).Get(cfg.OldName) if err != nil { return nil, err } if len(cfg.Container) != 0 { containerFound := false for i, c := range newRc.Spec.Template.Spec.Containers { if c.Name == cfg.Container { containerIndex = i containerFound = true break } } if !containerFound { return nil, fmt.Errorf("container %s not found in pod", cfg.Container) } } if len(newRc.Spec.Template.Spec.Containers) > 1 && len(cfg.Container) == 0 { return nil, goerrors.New("Must specify container to update when updating a multi-container pod") } if len(newRc.Spec.Template.Spec.Containers) == 0 { return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc)) } newRc.Spec.Template.Spec.Containers[containerIndex].Image = cfg.Image if len(cfg.PullPolicy) != 0 { newRc.Spec.Template.Spec.Containers[containerIndex].ImagePullPolicy = cfg.PullPolicy } newHash, err := api.HashObject(newRc, codec) if err != nil { return nil, err } if len(cfg.NewName) == 0 { cfg.NewName = fmt.Sprintf("%s-%s", newRc.Name, newHash) } newRc.Name = cfg.NewName newRc.Spec.Selector[cfg.DeploymentKey] = newHash newRc.Spec.Template.Labels[cfg.DeploymentKey] = newHash // Clear resource version after hashing so that identical updates get different hashes. newRc.ResourceVersion = "" return newRc, nil }
func FindSourceController(r coreclient.ReplicationControllersGetter, namespace, name string) (*api.ReplicationController, error) { list, err := r.ReplicationControllers(namespace).List(api.ListOptions{}) if err != nil { return nil, err } for ix := range list.Items { rc := &list.Items[ix] if rc.Annotations != nil && strings.HasPrefix(rc.Annotations[sourceIdAnnotation], name) { return rc, nil } } return nil, fmt.Errorf("couldn't find a replication controller with source id == %s/%s", namespace, name) }
// ControllerHasDesiredReplicas returns a condition that will be true if and only if // the desired replica count for a controller's ReplicaSelector equals the Replicas count. func ControllerHasDesiredReplicas(rcClient coreclient.ReplicationControllersGetter, controller *api.ReplicationController) wait.ConditionFunc { // If we're given a controller where the status lags the spec, it either means that the controller is stale, // or that the rc manager hasn't noticed the update yet. Polling status.Replicas is not safe in the latter case. desiredGeneration := controller.Generation return func() (bool, error) { ctrl, err := rcClient.ReplicationControllers(controller.Namespace).Get(controller.Name) if err != nil { return false, err } // There's a chance a concurrent update modifies the Spec.Replicas causing this check to pass, // or, after this check has passed, a modification causes the rc manager to create more pods. // This will not be an issue once we've implemented graceful delete for rcs, but till then // concurrent stop operations on the same rc might have unintended side effects. return ctrl.Status.ObservedGeneration >= desiredGeneration && ctrl.Status.Replicas == ctrl.Spec.Replicas, nil } }
func Rename(c coreclient.ReplicationControllersGetter, rc *api.ReplicationController, newName string) error { oldName := rc.Name rc.Name = newName rc.ResourceVersion = "" // First delete the oldName RC and orphan its pods. trueVar := true err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &api.DeleteOptions{OrphanDependents: &trueVar}) if err != nil && !errors.IsNotFound(err) { return err } err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) { _, err := c.ReplicationControllers(rc.Namespace).Get(oldName) if err == nil { return false, nil } else if errors.IsNotFound(err) { return true, nil } else { return false, err } }) if err != nil { return err } // Then create the same RC with the new name. _, err = c.ReplicationControllers(rc.Namespace).Create(rc) if err != nil { return err } return nil }