// cancel cancels any deployment process in progress for config. func (o DeployOptions) cancel(config *deployapi.DeploymentConfig) error { if config.Spec.Paused { return fmt.Errorf("cannot cancel a paused deployment config") } deployments, err := o.kubeClient.ReplicationControllers(config.Namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(config.Name)}) if err != nil { return err } if len(deployments.Items) == 0 { fmt.Fprintf(o.out, "There have been no deployments for %s/%s\n", config.Namespace, config.Name) return nil } sort.Sort(deployutil.ByLatestVersionDesc(deployments.Items)) failedCancellations := []string{} anyCancelled := false for _, deployment := range deployments.Items { status := deployutil.DeploymentStatusFor(&deployment) switch status { case deployapi.DeploymentStatusNew, deployapi.DeploymentStatusPending, deployapi.DeploymentStatusRunning: if deployutil.IsDeploymentCancelled(&deployment) { continue } deployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledByUser _, err := o.kubeClient.ReplicationControllers(deployment.Namespace).Update(&deployment) if err == nil { fmt.Fprintf(o.out, "Cancelled deployment #%d\n", config.Status.LatestVersion) anyCancelled = true } else { fmt.Fprintf(o.out, "Couldn't cancel deployment #%d (status: %s): %v\n", deployutil.DeploymentVersionFor(&deployment), status, err) failedCancellations = append(failedCancellations, strconv.FormatInt(deployutil.DeploymentVersionFor(&deployment), 10)) } } } if len(failedCancellations) > 0 { return fmt.Errorf("couldn't cancel deployment %s", strings.Join(failedCancellations, ", ")) } if !anyCancelled { latest := &deployments.Items[0] maybeCancelling := "" if deployutil.IsDeploymentCancelled(latest) && !deployutil.IsTerminatedDeployment(latest) { maybeCancelling = " (cancelling)" } timeAt := strings.ToLower(units.HumanDuration(time.Now().Sub(latest.CreationTimestamp.Time))) fmt.Fprintf(o.out, "No deployments are in progress (latest deployment #%d %s%s %s ago)\n", deployutil.DeploymentVersionFor(latest), strings.ToLower(string(deployutil.DeploymentStatusFor(latest))), maybeCancelling, timeAt) } return nil }
func deploymentStatuses(rcs []kapi.ReplicationController) []string { statuses := []string{} for _, rc := range rcs { statuses = append(statuses, string(deployutil.DeploymentStatusFor(&rc))) } return statuses }
// TestHandle_runningPod ensures that a running deployer pod results in a // transition of the deployment's status to running. func TestHandle_runningPod(t *testing.T) { deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), kapi.Codecs.LegacyCodec(deployapi.SchemeGroupVersion)) deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusPending) var updatedDeployment *kapi.ReplicationController kFake := &ktestclient.Fake{} kFake.PrependReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { return true, deployment, nil }) kFake.PrependReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { updatedDeployment = deployment return true, deployment, nil }) controller := &DeployerPodController{ store: cache.NewStore(cache.MetaNamespaceKeyFunc), kClient: kFake, } err := controller.Handle(runningPod(deployment)) if err != nil { t.Fatalf("unexpected error: %v", err) } if updatedDeployment == nil { t.Fatalf("expected deployment update") } if e, a := deployapi.DeploymentStatusRunning, deployutil.DeploymentStatusFor(updatedDeployment); e != a { t.Fatalf("expected updated deployment status %s, got %s", e, a) } }
// TestHandle_runningPod ensures that a running deployer pod results in a // transition of the deployment's status to running. func TestHandle_runningPod(t *testing.T) { deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), kapi.Codec) deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusPending) var updatedDeployment *kapi.ReplicationController controller := &DeployerPodController{ deploymentClient: &deploymentClientImpl{ getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) { return deployment, nil }, updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) { updatedDeployment = deployment return deployment, nil }, }, } err := controller.Handle(runningPod(deployment)) if err != nil { t.Fatalf("unexpected error: %v", err) } if updatedDeployment == nil { t.Fatalf("expected deployment update") } if e, a := deployapi.DeploymentStatusRunning, deployutil.DeploymentStatusFor(updatedDeployment); e != a { t.Fatalf("expected updated deployment status %s, got %s", e, a) } }
func describeDeployments(node *deploygraph.DeploymentConfigNode, count int) []string { if node == nil { return nil } out := []string{} deployments := node.Deployments if node.ActiveDeployment == nil { on, auto := describeDeploymentConfigTriggers(node.DeploymentConfig) if node.DeploymentConfig.LatestVersion == 0 { out = append(out, fmt.Sprintf("#1 deployment waiting %s", on)) } else if auto { out = append(out, fmt.Sprintf("#%d deployment pending %s", node.DeploymentConfig.LatestVersion, on)) } // TODO: detect new image available? } else { deployments = append([]*kapi.ReplicationController{node.ActiveDeployment}, deployments...) } for i, deployment := range deployments { out = append(out, describeDeploymentStatus(deployment, i == 0)) switch { case count == -1: if deployutil.DeploymentStatusFor(deployment) == deployapi.DeploymentStatusComplete { return out } default: if i+1 >= count { return out } } } return out }
func TestHandle_cancelNew(t *testing.T) { var updatedDeployment *kapi.ReplicationController fake := &ktestclient.Fake{} fake.AddReactor("create", "pods", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { t.Fatalf("unexpected call to create pod") return true, nil, nil }) fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { rc := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController) updatedDeployment = rc return true, rc, nil }) deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), codec) deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew) deployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue controller := okDeploymentController(fake, deployment, nil, true) if err := controller.Handle(deployment); err != nil { t.Fatalf("unexpected error: %v", err) } if e, a := deployapi.DeploymentStatusPending, deployutil.DeploymentStatusFor(updatedDeployment); e != a { t.Fatalf("expected deployment status %s, got %s", e, a) } }
// WaitForRunningDeployment waits until the specified deployment is no longer New or Pending. Returns true if // the deployment became running, complete, or failed within timeout, false if it did not, and an error if any // other error state occurred. The last observed deployment state is returned. func WaitForRunningDeployment(rn kclient.ReplicationControllersNamespacer, observed *kapi.ReplicationController, timeout time.Duration) (*kapi.ReplicationController, bool, error) { fieldSelector := fields.Set{"metadata.name": observed.Name}.AsSelector() w, err := rn.ReplicationControllers(observed.Namespace).Watch(labels.Everything(), fieldSelector, observed.ResourceVersion) if err != nil { return observed, false, err } defer w.Stop() ch := w.ResultChan() // Passing time.After like this (vs receiving directly in a select) will trigger the channel // and the timeout will have full effect here. expire := time.After(timeout) for { select { case event := <-ch: obj, ok := event.Object.(*kapi.ReplicationController) if !ok { return observed, false, errors.New("received unknown object while watching for deployments") } observed = obj switch deployutil.DeploymentStatusFor(observed) { case api.DeploymentStatusRunning, api.DeploymentStatusFailed, api.DeploymentStatusComplete: return observed, true, nil case api.DeploymentStatusNew, api.DeploymentStatusPending: default: return observed, false, ErrUnknownDeploymentPhase } case <-expire: return observed, false, nil } } }
// TestHandle_deployerPodDisappeared ensures that a pending/running deployment // is failed when its deployer pod vanishes. func TestHandle_deployerPodDisappeared(t *testing.T) { var updatedDeployment *kapi.ReplicationController updateCalled := false fake := &ktestclient.Fake{} fake.AddReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { rc := action.(ktestclient.UpdateAction).GetObject().(*kapi.ReplicationController) updatedDeployment = rc updateCalled = true return true, nil, nil }) deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), codec) deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusRunning) controller := okDeploymentController(fake, nil, nil, true) if err := controller.Handle(deployment); err != nil { t.Fatalf("unexpected error: %v", err) } if !updateCalled { t.Fatalf("expected update") } if e, a := deployapi.DeploymentStatusFailed, deployutil.DeploymentStatusFor(updatedDeployment); e != a { t.Fatalf("expected deployment status %s, got %s", e, a) } }
// WaitForRunningDeployment waits until the specified deployment is no longer New or Pending. Returns true if // the deployment became running, complete, or failed within timeout, false if it did not, and an error if any // other error state occurred. The last observed deployment state is returned. func WaitForRunningDeployment(rn kclient.ReplicationControllersNamespacer, observed *kapi.ReplicationController, timeout time.Duration) (*kapi.ReplicationController, bool, error) { fieldSelector := fields.Set{"metadata.name": observed.Name}.AsSelector() options := kapi.ListOptions{FieldSelector: fieldSelector, ResourceVersion: observed.ResourceVersion} w, err := rn.ReplicationControllers(observed.Namespace).Watch(options) if err != nil { return observed, false, err } defer w.Stop() if _, err := watch.Until(timeout, w, func(e watch.Event) (bool, error) { if e.Type == watch.Error { return false, fmt.Errorf("encountered error while watching for replication controller: %v", e.Object) } obj, isController := e.Object.(*kapi.ReplicationController) if !isController { return false, fmt.Errorf("received unknown object while watching for deployments: %v", obj) } observed = obj switch deployutil.DeploymentStatusFor(observed) { case api.DeploymentStatusRunning, api.DeploymentStatusFailed, api.DeploymentStatusComplete: return true, nil case api.DeploymentStatusNew, api.DeploymentStatusPending: return false, nil default: return false, ErrUnknownDeploymentPhase } }); err != nil { return observed, false, err } return observed, true, nil }
// deploy launches a new deployment unless there's already a deployment // process in progress for config. func (o DeployOptions) deploy(config *deployapi.DeploymentConfig, out io.Writer) error { if config.Spec.Paused { return fmt.Errorf("cannot deploy a paused deployment config") } // TODO: This implies that deploymentconfig.status.latestVersion is always synced. Currently, // that's the case because clients (oc, trigger controllers) are updating the status directly. // Clients should be acting either on spec or on annotations and status updates should be a // responsibility of the main controller. We need to start by unplugging this assumption from // our client tools. deploymentName := deployutil.LatestDeploymentNameForConfig(config) deployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName) if err == nil { // Reject attempts to start a concurrent deployment. status := deployutil.DeploymentStatusFor(deployment) if status != deployapi.DeploymentStatusComplete && status != deployapi.DeploymentStatusFailed { return fmt.Errorf("#%d is already in progress (%s).\nOptionally, you can cancel this deployment using the --cancel option.", config.Status.LatestVersion, status) } } else { if !kerrors.IsNotFound(err) { return err } } config.Status.LatestVersion++ dc, err := o.osClient.DeploymentConfigs(config.Namespace).Update(config) if err != nil { return err } fmt.Fprintf(out, "Started deployment #%d\n", dc.Status.LatestVersion) fmt.Fprintf(out, "Use '%s logs -f dc/%s' to track its progress.\n", o.baseCommandName, dc.Name) return nil }
// findTargetDeployment finds the deployment which is the rollback target by // searching for deployments associated with config. If desiredVersion is >0, // the deployment matching desiredVersion will be returned. If desiredVersion // is <=0, the last completed deployment which is older than the config's // version will be returned. func (o *RollbackOptions) findTargetDeployment(config *deployapi.DeploymentConfig, desiredVersion int64) (*kapi.ReplicationController, error) { // Find deployments for the config sorted by version descending. deployments, err := o.kc.ReplicationControllers(config.Namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(config.Name)}) if err != nil { return nil, err } sort.Sort(deployutil.ByLatestVersionDesc(deployments.Items)) // Find the target deployment for rollback. If a version was specified, // use the version for a search. Otherwise, use the last completed // deployment. var target *kapi.ReplicationController for _, deployment := range deployments.Items { version := deployutil.DeploymentVersionFor(&deployment) if desiredVersion > 0 { if version == desiredVersion { target = &deployment break } } else { if version < config.Status.LatestVersion && deployutil.DeploymentStatusFor(&deployment) == deployapi.DeploymentStatusComplete { target = &deployment break } } } if target == nil { return nil, fmt.Errorf("couldn't find deployment for rollback") } return target, nil }
func describeDeployments(f formatter, dcNode *deploygraph.DeploymentConfigNode, activeDeployment *kubegraph.ReplicationControllerNode, inactiveDeployments []*kubegraph.ReplicationControllerNode, count int) []string { if dcNode == nil { return nil } out := []string{} deploymentsToPrint := append([]*kubegraph.ReplicationControllerNode{}, inactiveDeployments...) if activeDeployment == nil { on, auto := describeDeploymentConfigTriggers(dcNode.DeploymentConfig) if dcNode.DeploymentConfig.Status.LatestVersion == 0 { out = append(out, fmt.Sprintf("deployment #1 waiting %s", on)) } else if auto { out = append(out, fmt.Sprintf("deployment #%d pending %s", dcNode.DeploymentConfig.Status.LatestVersion, on)) } // TODO: detect new image available? } else { deploymentsToPrint = append([]*kubegraph.ReplicationControllerNode{activeDeployment}, inactiveDeployments...) } for i, deployment := range deploymentsToPrint { out = append(out, describeDeploymentStatus(deployment.ReplicationController, i == 0, dcNode.DeploymentConfig.Spec.Test)) switch { case count == -1: if deployutil.DeploymentStatusFor(deployment) == deployapi.DeploymentStatusComplete { return out } default: if i+1 >= count { return out } } } return out }
func printDeploymentRc(deployment *kapi.ReplicationController, client deploymentDescriberClient, w io.Writer, header string, verbose bool) error { if len(header) > 0 { fmt.Fprintf(w, "%v:\n", header) } if verbose { fmt.Fprintf(w, "\tName:\t%s\n", deployment.Name) } timeAt := strings.ToLower(formatRelativeTime(deployment.CreationTimestamp.Time)) fmt.Fprintf(w, "\tCreated:\t%s ago\n", timeAt) fmt.Fprintf(w, "\tStatus:\t%s\n", deployutil.DeploymentStatusFor(deployment)) fmt.Fprintf(w, "\tReplicas:\t%d current / %d desired\n", deployment.Status.Replicas, deployment.Spec.Replicas) if verbose { fmt.Fprintf(w, "\tSelector:\t%s\n", formatLabels(deployment.Spec.Selector)) fmt.Fprintf(w, "\tLabels:\t%s\n", formatLabels(deployment.Labels)) running, waiting, succeeded, failed, err := getPodStatusForDeployment(deployment, client) if err != nil { return err } fmt.Fprintf(w, "\tPods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) } return nil }
func describeDeploymentStatus(deploy *kapi.ReplicationController, first, test bool) string { timeAt := strings.ToLower(formatRelativeTime(deploy.CreationTimestamp.Time)) status := deployutil.DeploymentStatusFor(deploy) version := deployutil.DeploymentVersionFor(deploy) maybeCancelling := "" if deployutil.IsDeploymentCancelled(deploy) && !deployutil.IsTerminatedDeployment(deploy) { maybeCancelling = " (cancelling)" } switch status { case deployapi.DeploymentStatusFailed: reason := deployutil.DeploymentStatusReasonFor(deploy) if len(reason) > 0 { reason = fmt.Sprintf(": %s", reason) } // TODO: encode fail time in the rc return fmt.Sprintf("deployment #%d failed %s ago%s%s", version, timeAt, reason, describePodSummaryInline(deploy, false)) case deployapi.DeploymentStatusComplete: // TODO: pod status output if test { return fmt.Sprintf("test deployment #%d deployed %s ago", version, timeAt) } return fmt.Sprintf("deployment #%d deployed %s ago%s", version, timeAt, describePodSummaryInline(deploy, first)) case deployapi.DeploymentStatusRunning: format := "deployment #%d running%s for %s%s" if test { format = "test deployment #%d running%s for %s%s" } return fmt.Sprintf(format, version, maybeCancelling, timeAt, describePodSummaryInline(deploy, false)) default: return fmt.Sprintf("deployment #%d %s%s %s ago%s", version, strings.ToLower(string(status)), maybeCancelling, timeAt, describePodSummaryInline(deploy, false)) } }
// retry resets the status of the latest deployment to New, which will cause // the deployment to be retried. An error is returned if the deployment is not // currently in a failed state. func (o DeployOptions) retry(config *deployapi.DeploymentConfig) error { if config.Spec.Paused { return fmt.Errorf("cannot retry a paused deployment config") } if config.Status.LatestVersion == 0 { return fmt.Errorf("no deployments found for %s/%s", config.Namespace, config.Name) } // TODO: This implies that deploymentconfig.status.latestVersion is always synced. Currently, // that's the case because clients (oc, trigger controllers) are updating the status directly. // Clients should be acting either on spec or on annotations and status updates should be a // responsibility of the main controller. We need to start by unplugging this assumption from // our client tools. deploymentName := deployutil.LatestDeploymentNameForConfig(config) deployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName) if err != nil { if kerrors.IsNotFound(err) { return fmt.Errorf("unable to find the latest deployment (#%d).\nYou can start a new deployment using the --latest option.", config.Status.LatestVersion) } return err } if status := deployutil.DeploymentStatusFor(deployment); status != deployapi.DeploymentStatusFailed { message := fmt.Sprintf("#%d is %s; only failed deployments can be retried.\n", config.Status.LatestVersion, status) if status == deployapi.DeploymentStatusComplete { message += "You can start a new deployment using the --latest option." } else { message += "Optionally, you can cancel this deployment using the --cancel option." } return fmt.Errorf(message) } // Delete the deployer pod as well as the deployment hooks pods, if any pods, err := o.kubeClient.Pods(config.Namespace).List(kapi.ListOptions{LabelSelector: deployutil.DeployerPodSelector(deploymentName)}) if err != nil { return fmt.Errorf("failed to list deployer/hook pods for deployment #%d: %v", config.Status.LatestVersion, err) } for _, pod := range pods.Items { err := o.kubeClient.Pods(pod.Namespace).Delete(pod.Name, kapi.NewDeleteOptions(0)) if err != nil { return fmt.Errorf("failed to delete deployer/hook pod %s for deployment #%d: %v", pod.Name, config.Status.LatestVersion, err) } } deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew) // clear out the cancellation flag as well as any previous status-reason annotation delete(deployment.Annotations, deployapi.DeploymentStatusReasonAnnotation) delete(deployment.Annotations, deployapi.DeploymentCancelledAnnotation) _, err = o.kubeClient.ReplicationControllers(deployment.Namespace).Update(deployment) if err != nil { return err } fmt.Fprintf(o.out, "Retried #%d\n", config.Status.LatestVersion) if o.follow { return o.getLogs(config) } fmt.Fprintf(o.out, "Use '%s logs -f dc/%s' to track its progress.\n", o.baseCommandName, config.Name) return nil }
func (o RolloutLatestOptions) RunRolloutLatest() error { info := o.infos[0] config, ok := info.Object.(*deployapi.DeploymentConfig) if !ok { return fmt.Errorf("%s is not a deployment config", info.Name) } // TODO: Consider allowing one-off deployments for paused configs // See https://github.com/openshift/origin/issues/9903 if config.Spec.Paused { return fmt.Errorf("cannot deploy a paused deployment config") } deploymentName := deployutil.LatestDeploymentNameForConfig(config) deployment, err := o.kc.ReplicationControllers(config.Namespace).Get(deploymentName) switch { case err == nil: // Reject attempts to start a concurrent deployment. if !deployutil.IsTerminatedDeployment(deployment) { status := deployutil.DeploymentStatusFor(deployment) return fmt.Errorf("#%d is already in progress (%s).", config.Status.LatestVersion, status) } case !kerrors.IsNotFound(err): return err } dc := config if !o.DryRun { request := &deployapi.DeploymentRequest{ Name: config.Name, Latest: !o.again, Force: true, } dc, err = o.oc.DeploymentConfigs(config.Namespace).Instantiate(request) // Pre 1.4 servers don't support the instantiate endpoint. Fallback to incrementing // latestVersion on them. if kerrors.IsNotFound(err) || kerrors.IsForbidden(err) { config.Status.LatestVersion++ dc, err = o.oc.DeploymentConfigs(config.Namespace).Update(config) } if err != nil { return err } info.Refresh(dc, true) } if o.output == "revision" { fmt.Fprintf(o.out, fmt.Sprintf("%d", dc.Status.LatestVersion)) return nil } kcmdutil.PrintSuccess(o.mapper, o.output == "name", o.out, info.Mapping.Resource, info.Name, o.DryRun, "rolled out") return nil }
// ViewHistory returns a description of all the history it can find for a deployment config. func (h *DeploymentConfigHistoryViewer) ViewHistory(namespace, name string, revision int64) (string, error) { opts := kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(name)} deploymentList, err := h.rn.ReplicationControllers(namespace).List(opts) if err != nil { return "", err } if len(deploymentList.Items) == 0 { return "No rollout history found.", nil } items := deploymentList.Items history := make([]*kapi.ReplicationController, 0, len(items)) for i := range items { history = append(history, &items[i]) } // Print details of a specific revision if revision > 0 { var desired *kapi.PodTemplateSpec // We could use a binary search here but brute-force is always faster to write for i := range history { rc := history[i] if deployutil.DeploymentVersionFor(rc) == revision { desired = rc.Spec.Template break } } if desired == nil { return "", fmt.Errorf("unable to find the specified revision") } buf := bytes.NewBuffer([]byte{}) kubectl.DescribePodTemplate(desired, buf) return buf.String(), nil } sort.Sort(deployutil.ByLatestVersionAsc(history)) return tabbedString(func(out *tabwriter.Writer) error { fmt.Fprintf(out, "REVISION\tSTATUS\tCAUSE\n") for i := range history { rc := history[i] rev := deployutil.DeploymentVersionFor(rc) status := deployutil.DeploymentStatusFor(rc) cause := rc.Annotations[deployapi.DeploymentStatusReasonAnnotation] if len(cause) == 0 { cause = "<unknown>" } fmt.Fprintf(out, "%d\t%s\t%s\n", rev, status, cause) } return nil }) }
// TestHandle_canceledDeploymentTrigger ensures that a canceled deployment // will trigger a reconcilation of its deploymentconfig (via an annotation // update) so that rolling back can happen on the spot and not rely on the // deploymentconfig cache resync interval. func TestHandle_canceledDeploymentTriggerTest(t *testing.T) { var ( updatedDeployment *kapi.ReplicationController updatedConfig *deployapi.DeploymentConfig ) initial := deploytest.OkDeploymentConfig(1) // Canceled deployment deployment, _ := deployutil.MakeDeployment(deploytest.TestDeploymentConfig(initial), kapi.Codecs.LegacyCodec(deployapi.SchemeGroupVersion)) deployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue kFake := &ktestclient.Fake{} kFake.PrependReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { return true, deployment, nil }) kFake.PrependReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { updatedDeployment = deployment return true, deployment, nil }) fake := &testclient.Fake{} fake.PrependReactor("get", "deploymentconfigs", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { config := initial return true, config, nil }) fake.PrependReactor("update", "deploymentconfigs", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) { updated := action.(ktestclient.UpdateAction).GetObject().(*deployapi.DeploymentConfig) updatedConfig = updated return true, updated, nil }) controller := &DeployerPodController{ decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) { return deployutil.DecodeDeploymentConfig(deployment, kapi.Codecs.UniversalDecoder()) }, store: cache.NewStore(cache.MetaNamespaceKeyFunc), client: fake, kClient: kFake, } err := controller.Handle(terminatedPod(deployment)) if err != nil { t.Fatalf("unexpected error: %v", err) } if updatedDeployment == nil { t.Fatalf("expected deployment update") } if e, a := deployapi.DeploymentStatusFailed, deployutil.DeploymentStatusFor(updatedDeployment); e != a { t.Fatalf("expected updated deployment status %s, got %s", e, a) } if updatedConfig == nil { t.Fatalf("expected config update") } }
// TestHandle_deploymentCleanupTransientError ensures that a failure // to clean up a failed deployment results in a transient error // and the deployment status is not set to Failed. func TestHandle_deploymentCleanupTransientError(t *testing.T) { completedDeployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), kapi.Codec) completedDeployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusComplete) currentDeployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(2), kapi.Codec) currentDeployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusRunning) currentDeployment.Annotations[deployapi.DesiredReplicasAnnotation] = "2" controller := &DeployerPodController{ deploymentClient: &deploymentClientImpl{ getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) { return currentDeployment, nil }, updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) { // simulate failure ONLY for the completed deployment if deployutil.DeploymentStatusFor(deployment) == deployapi.DeploymentStatusComplete { return nil, fmt.Errorf("test failure in updating completed deployment") } return deployment, nil }, listDeploymentsForConfigFunc: func(namespace, configName string) (*kapi.ReplicationControllerList, error) { return &kapi.ReplicationControllerList{Items: []kapi.ReplicationController{*currentDeployment, *completedDeployment}}, nil }, }, } err := controller.Handle(terminatedPod(currentDeployment)) if err == nil { t.Fatalf("unexpected error: %v", err) } if _, isTransient := err.(transientError); !isTransient { t.Fatalf("expected transientError on failure to update deployment") } if e, a := deployapi.DeploymentStatusRunning, deployutil.DeploymentStatusFor(currentDeployment); e != a { t.Fatalf("expected updated deployment status to remain %s, got %s", e, a) } }
func updateConditions(config *deployapi.DeploymentConfig, newStatus *deployapi.DeploymentConfigStatus, latestRC *kapi.ReplicationController) { // Availability condition. if newStatus.AvailableReplicas >= config.Spec.Replicas-deployutil.MaxUnavailable(*config) { minAvailability := deployutil.NewDeploymentCondition(deployapi.DeploymentAvailable, kapi.ConditionTrue, "", "Deployment config has minimum availability.") deployutil.SetDeploymentCondition(newStatus, *minAvailability) } else { noMinAvailability := deployutil.NewDeploymentCondition(deployapi.DeploymentAvailable, kapi.ConditionFalse, "", "Deployment config does not have minimum availability.") deployutil.SetDeploymentCondition(newStatus, *noMinAvailability) } // Condition about progress. cond := deployutil.GetDeploymentCondition(*newStatus, deployapi.DeploymentProgressing) if latestRC != nil { switch deployutil.DeploymentStatusFor(latestRC) { case deployapi.DeploymentStatusNew, deployapi.DeploymentStatusPending: msg := fmt.Sprintf("Waiting on deployer pod for %q to be scheduled", latestRC.Name) condition := deployutil.NewDeploymentCondition(deployapi.DeploymentProgressing, kapi.ConditionUnknown, "", msg) deployutil.SetDeploymentCondition(newStatus, *condition) case deployapi.DeploymentStatusRunning: msg := fmt.Sprintf("Replication controller %q is progressing", latestRC.Name) condition := deployutil.NewDeploymentCondition(deployapi.DeploymentProgressing, kapi.ConditionTrue, deployutil.ReplicationControllerUpdatedReason, msg) deployutil.SetDeploymentCondition(newStatus, *condition) case deployapi.DeploymentStatusFailed: if cond != nil && cond.Reason == deployutil.TimedOutReason { break } msg := fmt.Sprintf("Replication controller %q has failed progressing", latestRC.Name) condition := deployutil.NewDeploymentCondition(deployapi.DeploymentProgressing, kapi.ConditionFalse, deployutil.TimedOutReason, msg) deployutil.SetDeploymentCondition(newStatus, *condition) case deployapi.DeploymentStatusComplete: if cond != nil && cond.Reason == deployutil.NewRcAvailableReason { break } msg := fmt.Sprintf("Replication controller %q has completed progressing", latestRC.Name) condition := deployutil.NewDeploymentCondition(deployapi.DeploymentProgressing, kapi.ConditionTrue, deployutil.NewRcAvailableReason, msg) deployutil.SetDeploymentCondition(newStatus, *condition) } } // Pause / resume condition. Since we don't pause running deployments, let's use paused conditions only when a deployment // actually terminates. For now it may be ok to override lack of progress in the conditions, later we may want to separate // paused from the rest of the progressing conditions. if latestRC == nil || deployutil.IsTerminatedDeployment(latestRC) { pausedCondExists := cond != nil && cond.Reason == deployutil.PausedDeployReason if config.Spec.Paused && !pausedCondExists { condition := deployutil.NewDeploymentCondition(deployapi.DeploymentProgressing, kapi.ConditionUnknown, deployutil.PausedDeployReason, "Deployment config is paused") deployutil.SetDeploymentCondition(newStatus, *condition) } else if !config.Spec.Paused && pausedCondExists { condition := deployutil.NewDeploymentCondition(deployapi.DeploymentProgressing, kapi.ConditionUnknown, deployutil.ResumedDeployReason, "Deployment config is resumed") deployutil.SetDeploymentCondition(newStatus, *condition) } } }
// TestHandle_deployerPodDisappeared ensures that a pending/running deployment // is failed when its deployer pod vanishes. func TestHandle_deployerPodDisappeared(t *testing.T) { tests := []struct { name string phase deployapi.DeploymentStatus }{ { name: "pending", phase: deployapi.DeploymentStatusPending, }, { name: "running", phase: deployapi.DeploymentStatusRunning, }, } for _, test := range tests { var updatedDeployment *kapi.ReplicationController updateCalled := false client := &fake.Clientset{} client.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) { rc := action.(core.UpdateAction).GetObject().(*kapi.ReplicationController) updatedDeployment = rc updateCalled = true return true, nil, nil }) deployment, err := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), codec) if err != nil { t.Errorf("%s: unexpected error: %v", test.name, err) continue } deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(test.phase) controller := okDeploymentController(client, nil, nil, true, kapi.PodUnknown) if err := controller.Handle(deployment); err != nil { t.Errorf("%s: unexpected error: %v", test.name, err) continue } if !updateCalled { t.Errorf("%s: expected update", test.name) continue } if e, a := deployapi.DeploymentStatusFailed, deployutil.DeploymentStatusFor(updatedDeployment); e != a { t.Errorf("%s: expected deployment status %q, got %q", test.name, e, a) } } }
// TestHandle_unrelatedPodAlreadyExists ensures that attempts to create a // deployer pod, when a pod with the same name but missing annotations results // a transition to failed. func TestHandle_unrelatedPodAlreadyExists(t *testing.T) { var updatedDeployment *kapi.ReplicationController config := deploytest.OkDeploymentConfig(1) deployment, _ := deployutil.MakeDeployment(config, kapi.Codec) deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew) otherPod := unrelatedPod(deployment) controller := &DeploymentController{ decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) { return deployutil.DecodeDeploymentConfig(deployment, api.Codec) }, deploymentClient: &deploymentClientImpl{ updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) { updatedDeployment = deployment return updatedDeployment, nil }, }, podClient: &podClientImpl{ getPodFunc: func(namespace, name string) (*kapi.Pod, error) { return otherPod, nil }, createPodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) { return nil, kerrors.NewAlreadyExists("Pod", pod.Name) }, }, makeContainer: func(strategy *deployapi.DeploymentStrategy) (*kapi.Container, error) { return okContainer(), nil }, recorder: &record.FakeRecorder{}, } err := controller.Handle(deployment) if err != nil { t.Fatalf("unexpected error: %v", err) } if _, exists := updatedDeployment.Annotations[deployapi.DeploymentPodAnnotation]; exists { t.Fatalf("deployment updated with pod name annotation") } if e, a := deployapi.DeploymentFailedUnrelatedDeploymentExists, deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation]; e != a { t.Errorf("expected reason annotation %s, got %s", e, a) } if e, a := deployapi.DeploymentStatusFailed, deployutil.DeploymentStatusFor(updatedDeployment); e != a { t.Errorf("expected deployment status %s, got %s", e, a) } }
func TestHandle_cleanupNewWithDeployers(t *testing.T) { var updatedDeployment *kapi.ReplicationController deletedDeployer := false deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), kapi.Codecs.LegacyCodec(deployapi.SchemeGroupVersion)) deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew) deployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue controller := &DeploymentController{ decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) { return deployutil.DecodeDeploymentConfig(deployment, kapi.Codecs.LegacyCodec(deployapi.SchemeGroupVersion)) }, deploymentClient: &deploymentClientImpl{ updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) { updatedDeployment = deployment return updatedDeployment, nil }, }, podClient: &podClientImpl{ createPodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) { t.Fatalf("unexpected call to make container") return nil, nil }, getDeployerPodsForFunc: func(namespace, name string) ([]kapi.Pod, error) { return []kapi.Pod{*relatedPod(deployment)}, nil }, deletePodFunc: func(namespace, name string) error { deletedDeployer = true return nil }, }, makeContainer: func(strategy *deployapi.DeploymentStrategy) *kapi.Container { return okContainer() }, recorder: &record.FakeRecorder{}, } err := controller.Handle(deployment) if err != nil { t.Fatalf("unexpected error: %v", err) } if e, a := deployapi.DeploymentStatusPending, deployutil.DeploymentStatusFor(updatedDeployment); e != a { t.Fatalf("expected deployment status %s, got %s", e, a) } if !deletedDeployer { t.Fatalf("expected deployer delete") } }
// deploy launches a new deployment unless there's already a deployment // process in progress for config. func (o DeployOptions) deploy(config *deployapi.DeploymentConfig) error { if config.Spec.Paused { return fmt.Errorf("cannot deploy a paused deployment config") } // TODO: This implies that deploymentconfig.status.latestVersion is always synced. Currently, // that's the case because clients (oc, trigger controllers) are updating the status directly. // Clients should be acting either on spec or on annotations and status updates should be a // responsibility of the main controller. We need to start by unplugging this assumption from // our client tools. deploymentName := deployutil.LatestDeploymentNameForConfig(config) deployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName) if err == nil && !deployutil.IsTerminatedDeployment(deployment) { // Reject attempts to start a concurrent deployment. return fmt.Errorf("#%d is already in progress (%s).\nOptionally, you can cancel this deployment using the --cancel option.", config.Status.LatestVersion, deployutil.DeploymentStatusFor(deployment)) } if err != nil && !kerrors.IsNotFound(err) { return err } request := &deployapi.DeploymentRequest{ Name: config.Name, Latest: false, Force: true, } dc, err := o.osClient.DeploymentConfigs(config.Namespace).Instantiate(request) // Pre 1.4 servers don't support the instantiate endpoint. Fallback to incrementing // latestVersion on them. if kerrors.IsNotFound(err) || kerrors.IsForbidden(err) { config.Status.LatestVersion++ dc, err = o.osClient.DeploymentConfigs(config.Namespace).Update(config) } if err != nil { if kerrors.IsBadRequest(err) { err = fmt.Errorf("%v - try 'oc rollout latest dc/%s'", err, config.Name) } return err } fmt.Fprintf(o.out, "Started deployment #%d\n", dc.Status.LatestVersion) if o.follow { return o.getLogs(dc) } fmt.Fprintf(o.out, "Use '%s logs -f dc/%s' to track its progress.\n", o.baseCommandName, dc.Name) return nil }
// retry resets the status of the latest deployment to New, which will cause // the deployment to be retried. An error is returned if the deployment is not // currently in a failed state. func (o *DeployOptions) retry(config *deployapi.DeploymentConfig, out io.Writer) error { if config.LatestVersion == 0 { return fmt.Errorf("no deployments found for %s/%s", config.Namespace, config.Name) } deploymentName := deployutil.LatestDeploymentNameForConfig(config) deployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName) if err != nil { if kerrors.IsNotFound(err) { return fmt.Errorf("Unable to find the latest deployment (#%d).\nYou can start a new deployment using the --latest option.", config.LatestVersion) } return err } if status := deployutil.DeploymentStatusFor(deployment); status != deployapi.DeploymentStatusFailed { message := fmt.Sprintf("#%d is %s; only failed deployments can be retried.\n", config.LatestVersion, status) if status == deployapi.DeploymentStatusComplete { message += "You can start a new deployment using the --latest option." } else { message += "Optionally, you can cancel this deployment using the --cancel option." } return fmt.Errorf(message) } // Delete the deployer pod as well as the deployment hooks pods, if any pods, err := o.kubeClient.Pods(config.Namespace).List(deployutil.DeployerPodSelector(deploymentName), fields.Everything()) if err != nil { return fmt.Errorf("Failed to list deployer/hook pods for deployment #%d: %v", config.LatestVersion, err) } for _, pod := range pods.Items { err := o.kubeClient.Pods(pod.Namespace).Delete(pod.Name, kapi.NewDeleteOptions(0)) if err != nil { return fmt.Errorf("Failed to delete deployer/hook pod %s for deployment #%d: %v", pod.Name, config.LatestVersion, err) } } deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew) // clear out the cancellation flag as well as any previous status-reason annotation delete(deployment.Annotations, deployapi.DeploymentStatusReasonAnnotation) delete(deployment.Annotations, deployapi.DeploymentCancelledAnnotation) _, err = o.kubeClient.ReplicationControllers(deployment.Namespace).Update(deployment) if err == nil { fmt.Fprintf(out, "retried #%d\n", config.LatestVersion) } return err }
func (p *deploymentDeleter) DeleteDeployment(deployment *kapi.ReplicationController) error { glog.V(4).Infof("Deleting deployment %q", deployment.Name) // If the deployment is failed we need to remove its deployer pods, too. if deployutil.DeploymentStatusFor(deployment) == deployapi.DeploymentStatusFailed { dpSelector := deployutil.DeployerPodSelector(deployment.Name) deployers, err := p.pods.Pods(deployment.Namespace).List(kapi.ListOptions{LabelSelector: dpSelector}) if err != nil { glog.Warning("Cannot list deployer pods for %q: %v\n", deployment.Name, err) } else { for _, pod := range deployers.Items { if err := p.pods.Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { glog.Warning("Cannot remove deployer pod %q: %v\n", pod.Name, err) } } } } return p.deployments.ReplicationControllers(deployment.Namespace).Delete(deployment.Name) }
// TestHandle_podTerminatedFailNoContainerStatus ensures that a failed // deployer pod with no container status results in a transition of the // deployment's status to failed. func TestHandle_podTerminatedFailNoContainerStatusTest(t *testing.T) { var updatedDeployment *kapi.ReplicationController deployment, _ := deployutil.MakeDeployment(deploytest.TestDeploymentConfig(deploytest.OkDeploymentConfig(1)), kapi.Codec) deployment.Spec.Replicas = 1 // since we do not set the desired replicas annotation, // this also tests that the error is just logged and not result in a failure deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusRunning) controller := &DeployerPodController{ decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) { return deployutil.DecodeDeploymentConfig(deployment, kapi.Codec) }, deploymentClient: &deploymentClientImpl{ getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) { return deployment, nil }, updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) { updatedDeployment = deployment return deployment, nil }, listDeploymentsForConfigFunc: func(namespace, configName string) (*kapi.ReplicationControllerList, error) { return &kapi.ReplicationControllerList{Items: []kapi.ReplicationController{*deployment}}, nil }, }, } err := controller.Handle(terminatedPod(deployment)) if err != nil { t.Fatalf("unexpected error: %v", err) } if updatedDeployment == nil { t.Fatalf("expected deployment update") } if e, a := deployapi.DeploymentStatusFailed, deployutil.DeploymentStatusFor(updatedDeployment); e != a { t.Fatalf("expected updated deployment status %s, got %s", e, a) } if e, a := 0, updatedDeployment.Spec.Replicas; e != a { t.Fatalf("expected updated deployment replicas to be %d, got %d", e, a) } }
// Resolve the matching set of objects func (o *orphanDeploymentResolver) Resolve() ([]*kapi.ReplicationController, error) { deployments, err := o.dataSet.ListDeployments() if err != nil { return nil, err } results := []*kapi.ReplicationController{} for _, deployment := range deployments { deploymentStatus := deployutil.DeploymentStatusFor(deployment) if !o.deploymentStatusFilter.Has(string(deploymentStatus)) { continue } _, exists, _ := o.dataSet.GetDeploymentConfig(deployment) if !exists { results = append(results, deployment) } } return results, nil }
// cancel cancels any deployment process in progress for config. func (o *DeployOptions) cancel(config *deployapi.DeploymentConfig, out io.Writer) error { deployments, err := o.kubeClient.ReplicationControllers(config.Namespace).List(deployutil.ConfigSelector(config.Name)) if err != nil { return err } if len(deployments.Items) == 0 { fmt.Fprintln(out, "no deployments found to cancel") return nil } failedCancellations := []string{} anyCancelled := false for _, deployment := range deployments.Items { status := deployutil.DeploymentStatusFor(&deployment) switch status { case deployapi.DeploymentStatusNew, deployapi.DeploymentStatusPending, deployapi.DeploymentStatusRunning: if deployutil.IsDeploymentCancelled(&deployment) { continue } deployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledByUser _, err := o.kubeClient.ReplicationControllers(deployment.Namespace).Update(&deployment) if err == nil { fmt.Fprintf(out, "cancelled deployment #%d\n", config.LatestVersion) anyCancelled = true } else { fmt.Fprintf(out, "couldn't cancel deployment #%d (status: %s): %v\n", deployutil.DeploymentVersionFor(&deployment), status, err) failedCancellations = append(failedCancellations, strconv.Itoa(deployutil.DeploymentVersionFor(&deployment))) } } } if len(failedCancellations) > 0 { return fmt.Errorf("couldn't cancel deployment %s", strings.Join(failedCancellations, ", ")) } if !anyCancelled { fmt.Fprintln(out, "no active deployments to cancel") } return nil }
// TestHandle_deployerPodDisappeared ensures that a pending/running deployment // is failed when its deployer pod vanishes. func TestHandle_deployerPodDisappeared(t *testing.T) { var updatedDeployment *kapi.ReplicationController updateCalled := false controller := &DeploymentController{ decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) { return deployutil.DecodeDeploymentConfig(deployment, api.Codec) }, deploymentClient: &deploymentClientImpl{ updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) { updatedDeployment = deployment updateCalled = true return updatedDeployment, nil }, }, podClient: &podClientImpl{ getPodFunc: func(namespace, name string) (*kapi.Pod, error) { return nil, kerrors.NewNotFound("Pod", name) }, }, makeContainer: func(strategy *deployapi.DeploymentStrategy) (*kapi.Container, error) { return okContainer(), nil }, recorder: &record.FakeRecorder{}, } deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), kapi.Codec) deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusRunning) err := controller.Handle(deployment) if err != nil { t.Fatalf("unexpected error: %v", err) } if !updateCalled { t.Fatalf("expected update") } if e, a := deployapi.DeploymentStatusFailed, deployutil.DeploymentStatusFor(updatedDeployment); e != a { t.Fatalf("expected deployment status %s, got %s", e, a) } }