Exemplo n.º 1
0
func describeDeploymentStatus(deploy *kapi.ReplicationController, first, test bool) string {
	timeAt := strings.ToLower(formatRelativeTime(deploy.CreationTimestamp.Time))
	status := deployutil.DeploymentStatusFor(deploy)
	version := deployutil.DeploymentVersionFor(deploy)
	maybeCancelling := ""
	if deployutil.IsDeploymentCancelled(deploy) && !deployutil.IsTerminatedDeployment(deploy) {
		maybeCancelling = " (cancelling)"
	}

	switch status {
	case deployapi.DeploymentStatusFailed:
		reason := deployutil.DeploymentStatusReasonFor(deploy)
		if len(reason) > 0 {
			reason = fmt.Sprintf(": %s", reason)
		}
		// TODO: encode fail time in the rc
		return fmt.Sprintf("deployment #%d failed %s ago%s%s", version, timeAt, reason, describePodSummaryInline(deploy, false))
	case deployapi.DeploymentStatusComplete:
		// TODO: pod status output
		if test {
			return fmt.Sprintf("test deployment #%d deployed %s ago", version, timeAt)
		}
		return fmt.Sprintf("deployment #%d deployed %s ago%s", version, timeAt, describePodSummaryInline(deploy, first))
	case deployapi.DeploymentStatusRunning:
		format := "deployment #%d running%s for %s%s"
		if test {
			format = "test deployment #%d running%s for %s%s"
		}
		return fmt.Sprintf(format, version, maybeCancelling, timeAt, describePodSummaryInline(deploy, false))
	default:
		return fmt.Sprintf("deployment #%d %s%s %s ago%s", version, strings.ToLower(string(status)), maybeCancelling, timeAt, describePodSummaryInline(deploy, false))
	}
}
Exemplo n.º 2
0
func (o RolloutLatestOptions) RunRolloutLatest() error {
	info := o.infos[0]
	config, ok := info.Object.(*deployapi.DeploymentConfig)
	if !ok {
		return fmt.Errorf("%s is not a deployment config", info.Name)
	}

	// TODO: Consider allowing one-off deployments for paused configs
	// See https://github.com/openshift/origin/issues/9903
	if config.Spec.Paused {
		return fmt.Errorf("cannot deploy a paused deployment config")
	}

	deploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := o.kc.ReplicationControllers(config.Namespace).Get(deploymentName)
	switch {
	case err == nil:
		// Reject attempts to start a concurrent deployment.
		if !deployutil.IsTerminatedDeployment(deployment) {
			status := deployutil.DeploymentStatusFor(deployment)
			return fmt.Errorf("#%d is already in progress (%s).", config.Status.LatestVersion, status)
		}
	case !kerrors.IsNotFound(err):
		return err
	}

	dc := config
	if !o.DryRun {
		request := &deployapi.DeploymentRequest{
			Name:   config.Name,
			Latest: !o.again,
			Force:  true,
		}

		dc, err = o.oc.DeploymentConfigs(config.Namespace).Instantiate(request)

		// Pre 1.4 servers don't support the instantiate endpoint. Fallback to incrementing
		// latestVersion on them.
		if kerrors.IsNotFound(err) || kerrors.IsForbidden(err) {
			config.Status.LatestVersion++
			dc, err = o.oc.DeploymentConfigs(config.Namespace).Update(config)
		}

		if err != nil {
			return err
		}

		info.Refresh(dc, true)
	}

	if o.output == "revision" {
		fmt.Fprintf(o.out, fmt.Sprintf("%d", dc.Status.LatestVersion))
		return nil
	}

	kcmdutil.PrintSuccess(o.mapper, o.output == "name", o.out, info.Mapping.Resource, info.Name, o.DryRun, "rolled out")
	return nil
}
Exemplo n.º 3
0
// cancel cancels any deployment process in progress for config.
func (o DeployOptions) cancel(config *deployapi.DeploymentConfig) error {
	if config.Spec.Paused {
		return fmt.Errorf("cannot cancel a paused deployment config")
	}
	deployments, err := o.kubeClient.ReplicationControllers(config.Namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(config.Name)})
	if err != nil {
		return err
	}
	if len(deployments.Items) == 0 {
		fmt.Fprintf(o.out, "There have been no deployments for %s/%s\n", config.Namespace, config.Name)
		return nil
	}
	sort.Sort(deployutil.ByLatestVersionDesc(deployments.Items))
	failedCancellations := []string{}
	anyCancelled := false
	for _, deployment := range deployments.Items {
		status := deployutil.DeploymentStatusFor(&deployment)
		switch status {
		case deployapi.DeploymentStatusNew,
			deployapi.DeploymentStatusPending,
			deployapi.DeploymentStatusRunning:

			if deployutil.IsDeploymentCancelled(&deployment) {
				continue
			}

			deployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue
			deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledByUser
			_, err := o.kubeClient.ReplicationControllers(deployment.Namespace).Update(&deployment)
			if err == nil {
				fmt.Fprintf(o.out, "Cancelled deployment #%d\n", config.Status.LatestVersion)
				anyCancelled = true
			} else {
				fmt.Fprintf(o.out, "Couldn't cancel deployment #%d (status: %s): %v\n", deployutil.DeploymentVersionFor(&deployment), status, err)
				failedCancellations = append(failedCancellations, strconv.FormatInt(deployutil.DeploymentVersionFor(&deployment), 10))
			}
		}
	}
	if len(failedCancellations) > 0 {
		return fmt.Errorf("couldn't cancel deployment %s", strings.Join(failedCancellations, ", "))
	}
	if !anyCancelled {
		latest := &deployments.Items[0]
		maybeCancelling := ""
		if deployutil.IsDeploymentCancelled(latest) && !deployutil.IsTerminatedDeployment(latest) {
			maybeCancelling = " (cancelling)"
		}
		timeAt := strings.ToLower(units.HumanDuration(time.Now().Sub(latest.CreationTimestamp.Time)))
		fmt.Fprintf(o.out, "No deployments are in progress (latest deployment #%d %s%s %s ago)\n",
			deployutil.DeploymentVersionFor(latest),
			strings.ToLower(string(deployutil.DeploymentStatusFor(latest))),
			maybeCancelling,
			timeAt)
	}
	return nil
}
Exemplo n.º 4
0
func updateConditions(config *deployapi.DeploymentConfig, newStatus *deployapi.DeploymentConfigStatus, latestRC *kapi.ReplicationController) {
	// Availability condition.
	if newStatus.AvailableReplicas >= config.Spec.Replicas-deployutil.MaxUnavailable(*config) {
		minAvailability := deployutil.NewDeploymentCondition(deployapi.DeploymentAvailable, kapi.ConditionTrue, "", "Deployment config has minimum availability.")
		deployutil.SetDeploymentCondition(newStatus, *minAvailability)
	} else {
		noMinAvailability := deployutil.NewDeploymentCondition(deployapi.DeploymentAvailable, kapi.ConditionFalse, "", "Deployment config does not have minimum availability.")
		deployutil.SetDeploymentCondition(newStatus, *noMinAvailability)
	}
	// Condition about progress.
	cond := deployutil.GetDeploymentCondition(*newStatus, deployapi.DeploymentProgressing)
	if latestRC != nil {
		switch deployutil.DeploymentStatusFor(latestRC) {
		case deployapi.DeploymentStatusNew, deployapi.DeploymentStatusPending:
			msg := fmt.Sprintf("Waiting on deployer pod for %q to be scheduled", latestRC.Name)
			condition := deployutil.NewDeploymentCondition(deployapi.DeploymentProgressing, kapi.ConditionUnknown, "", msg)
			deployutil.SetDeploymentCondition(newStatus, *condition)
		case deployapi.DeploymentStatusRunning:
			msg := fmt.Sprintf("Replication controller %q is progressing", latestRC.Name)
			condition := deployutil.NewDeploymentCondition(deployapi.DeploymentProgressing, kapi.ConditionTrue, deployutil.ReplicationControllerUpdatedReason, msg)
			deployutil.SetDeploymentCondition(newStatus, *condition)
		case deployapi.DeploymentStatusFailed:
			if cond != nil && cond.Reason == deployutil.TimedOutReason {
				break
			}
			msg := fmt.Sprintf("Replication controller %q has failed progressing", latestRC.Name)
			condition := deployutil.NewDeploymentCondition(deployapi.DeploymentProgressing, kapi.ConditionFalse, deployutil.TimedOutReason, msg)
			deployutil.SetDeploymentCondition(newStatus, *condition)
		case deployapi.DeploymentStatusComplete:
			if cond != nil && cond.Reason == deployutil.NewRcAvailableReason {
				break
			}
			msg := fmt.Sprintf("Replication controller %q has completed progressing", latestRC.Name)
			condition := deployutil.NewDeploymentCondition(deployapi.DeploymentProgressing, kapi.ConditionTrue, deployutil.NewRcAvailableReason, msg)
			deployutil.SetDeploymentCondition(newStatus, *condition)
		}
	}
	// Pause / resume condition. Since we don't pause running deployments, let's use paused conditions only when a deployment
	// actually terminates. For now it may be ok to override lack of progress in the conditions, later we may want to separate
	// paused from the rest of the progressing conditions.
	if latestRC == nil || deployutil.IsTerminatedDeployment(latestRC) {
		pausedCondExists := cond != nil && cond.Reason == deployutil.PausedDeployReason
		if config.Spec.Paused && !pausedCondExists {
			condition := deployutil.NewDeploymentCondition(deployapi.DeploymentProgressing, kapi.ConditionUnknown, deployutil.PausedDeployReason, "Deployment config is paused")
			deployutil.SetDeploymentCondition(newStatus, *condition)
		} else if !config.Spec.Paused && pausedCondExists {
			condition := deployutil.NewDeploymentCondition(deployapi.DeploymentProgressing, kapi.ConditionUnknown, deployutil.ResumedDeployReason, "Deployment config is resumed")
			deployutil.SetDeploymentCondition(newStatus, *condition)
		}
	}
}
Exemplo n.º 5
0
// deploy launches a new deployment unless there's already a deployment
// process in progress for config.
func (o DeployOptions) deploy(config *deployapi.DeploymentConfig) error {
	if config.Spec.Paused {
		return fmt.Errorf("cannot deploy a paused deployment config")
	}
	// TODO: This implies that deploymentconfig.status.latestVersion is always synced. Currently,
	// that's the case because clients (oc, trigger controllers) are updating the status directly.
	// Clients should be acting either on spec or on annotations and status updates should be a
	// responsibility of the main controller. We need to start by unplugging this assumption from
	// our client tools.
	deploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName)
	if err == nil && !deployutil.IsTerminatedDeployment(deployment) {
		// Reject attempts to start a concurrent deployment.
		return fmt.Errorf("#%d is already in progress (%s).\nOptionally, you can cancel this deployment using the --cancel option.",
			config.Status.LatestVersion, deployutil.DeploymentStatusFor(deployment))
	}
	if err != nil && !kerrors.IsNotFound(err) {
		return err
	}

	request := &deployapi.DeploymentRequest{
		Name:   config.Name,
		Latest: false,
		Force:  true,
	}

	dc, err := o.osClient.DeploymentConfigs(config.Namespace).Instantiate(request)
	// Pre 1.4 servers don't support the instantiate endpoint. Fallback to incrementing
	// latestVersion on them.
	if kerrors.IsNotFound(err) || kerrors.IsForbidden(err) {
		config.Status.LatestVersion++
		dc, err = o.osClient.DeploymentConfigs(config.Namespace).Update(config)
	}
	if err != nil {
		if kerrors.IsBadRequest(err) {
			err = fmt.Errorf("%v - try 'oc rollout latest dc/%s'", err, config.Name)
		}
		return err
	}
	fmt.Fprintf(o.out, "Started deployment #%d\n", dc.Status.LatestVersion)
	if o.follow {
		return o.getLogs(dc)
	}
	fmt.Fprintf(o.out, "Use '%s logs -f dc/%s' to track its progress.\n", o.baseCommandName, dc.Name)
	return nil
}
Exemplo n.º 6
0
func (c *DeploymentConfigController) Handle(config *deployapi.DeploymentConfig) error {
	// There's nothing to reconcile until the version is nonzero.
	if config.Status.LatestVersion == 0 {
		glog.V(5).Infof("Waiting for first version of %q", deployutil.LabelForDeploymentConfig(config))
		return c.updateStatus(config)
	}

	// Find all deployments owned by the deploymentConfig.
	sel := deployutil.ConfigSelector(config.Name)
	existingDeployments, err := c.kubeClient.ReplicationControllers(config.Namespace).List(kapi.ListOptions{LabelSelector: sel})
	if err != nil {
		return err
	}

	latestIsDeployed, latestDeployment := deployutil.LatestDeploymentInfo(config, existingDeployments)
	// If the latest deployment doesn't exist yet, cancel any running
	// deployments to allow them to be superceded by the new config version.
	awaitingCancellations := false
	if !latestIsDeployed {
		for _, deployment := range existingDeployments.Items {
			// Skip deployments with an outcome.
			if deployutil.IsTerminatedDeployment(&deployment) {
				continue
			}
			// Cancel running deployments.
			awaitingCancellations = true
			if !deployutil.IsDeploymentCancelled(&deployment) {
				deployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue
				deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledNewerDeploymentExists
				_, err := c.kubeClient.ReplicationControllers(deployment.Namespace).Update(&deployment)
				if err != nil {
					c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCancellationFailed", "Failed to cancel deployment %q superceded by version %d: %s", deployment.Name, config.Status.LatestVersion, err)
				} else {
					c.recorder.Eventf(config, kapi.EventTypeNormal, "DeploymentCancelled", "Cancelled deployment %q superceded by version %d", deployment.Name, config.Status.LatestVersion)
				}
			}
		}
	}
	// Wait for deployment cancellations before reconciling or creating a new
	// deployment to avoid competing with existing deployment processes.
	if awaitingCancellations {
		c.recorder.Eventf(config, kapi.EventTypeNormal, "DeploymentAwaitingCancellation", "Deployment of version %d awaiting cancellation of older running deployments", config.Status.LatestVersion)
		// raise a transientError so that the deployment config can be re-queued
		return transientError(fmt.Sprintf("found previous inflight deployment for %s - requeuing", deployutil.LabelForDeploymentConfig(config)))
	}
	// If the latest deployment already exists, reconcile existing deployments
	// and return early.
	if latestIsDeployed {
		// If the latest deployment is still running, try again later. We don't
		// want to compete with the deployer.
		if !deployutil.IsTerminatedDeployment(latestDeployment) {
			return c.updateStatus(config)
		}
		return c.reconcileDeployments(existingDeployments, config)
	}
	// No deployments are running and the latest deployment doesn't exist, so
	// create the new deployment.
	deployment, err := deployutil.MakeDeployment(config, c.codec)
	if err != nil {
		return fatalError(fmt.Sprintf("couldn't make deployment from (potentially invalid) deployment config %s: %v", deployutil.LabelForDeploymentConfig(config), err))
	}
	created, err := c.kubeClient.ReplicationControllers(config.Namespace).Create(deployment)
	if err != nil {
		// If the deployment was already created, just move on. The cache could be
		// stale, or another process could have already handled this update.
		if errors.IsAlreadyExists(err) {
			return c.updateStatus(config)
		}
		c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCreationFailed", "Couldn't deploy version %d: %s", config.Status.LatestVersion, err)
		return fmt.Errorf("couldn't create deployment for deployment config %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}
	c.recorder.Eventf(config, kapi.EventTypeNormal, "DeploymentCreated", "Created new deployment %q for version %d", created.Name, config.Status.LatestVersion)

	return c.updateStatus(config)
}
Exemplo n.º 7
0
			g.By("verifying that both latestVersion and generation are updated")
			version, err := oc.Run("get").Args(resource, "--output=jsonpath=\"{.status.latestVersion}\"").Output()
			version = strings.Trim(version, "\"")
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By(fmt.Sprintf("checking the latest version for %s: %s", resource, version))
			o.Expect(version).To(o.ContainSubstring("1"))
			generation, err := oc.Run("get").Args(resource, "--output=jsonpath=\"{.metadata.generation}\"").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By(fmt.Sprintf("checking the generation for %s: %s", resource, generation))
			o.Expect(generation).To(o.ContainSubstring("1"))

			g.By("verifying the deployment is marked complete")
			err = wait.Poll(100*time.Millisecond, 1*time.Minute, func() (bool, error) {
				rc, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).Get(name + "-" + version)
				o.Expect(err).NotTo(o.HaveOccurred())
				return deployutil.IsTerminatedDeployment(rc), nil
			})
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("verifying that scaling updates the generation")
			_, err = oc.Run("scale").Args(resource, "--replicas=2").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			generation, err = oc.Run("get").Args(resource, "--output=jsonpath=\"{.metadata.generation}\"").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By(fmt.Sprintf("checking the generation for %s: %s", resource, generation))
			o.Expect(generation).To(o.ContainSubstring("2"))

			g.By("deploying a second time [new client]")
			_, err = oc.Run("deploy").Args("--latest", name).Output()
			o.Expect(err).NotTo(o.HaveOccurred())
Exemplo n.º 8
0
				pods, err := oc.KubeREST().Pods(oc.Namespace()).List(opts)
				if err != nil {
					return false, nil
				}

				ready = 0
				for i := range pods.Items {
					pod := pods.Items[i]
					if kapi.IsPodReady(&pod) {
						ready++
					}
				}

				return len(pods.Items) == ready, nil
			}); err != nil {
				o.Expect(fmt.Errorf("deployment config %q never became ready (ready: %d, desired: %d)",
					config.Name, ready, config.Spec.Replicas)).NotTo(o.HaveOccurred())
			}

			g.By("verifying that the deployment is still running")
			latestName := deployutil.DeploymentNameForConfigVersion(name, config.Status.LatestVersion)
			latest, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).Get(latestName)
			o.Expect(err).NotTo(o.HaveOccurred())
			if deployutil.IsTerminatedDeployment(latest) {
				o.Expect(fmt.Errorf("expected deployment %q not to have terminated", latest.Name)).NotTo(o.HaveOccurred())
			}
			o.Expect(waitForLatestCondition(oc, name, deploymentRunTimeout, deploymentRunning)).NotTo(o.HaveOccurred())
		})
	})
})
Exemplo n.º 9
0
// Handle implements the loop that processes deployment configs. Since this controller started
// using caches, the provided config MUST be deep-copied beforehand (see work() in factory.go).
func (c *DeploymentConfigController) Handle(config *deployapi.DeploymentConfig) error {
	// There's nothing to reconcile until the version is nonzero.
	if config.Status.LatestVersion == 0 {
		return c.updateStatus(config, []kapi.ReplicationController{})
	}

	// Find all deployments owned by the deployment config.
	selector := deployutil.ConfigSelector(config.Name)
	existingDeployments, err := c.rcStore.ReplicationControllers(config.Namespace).List(selector)
	if err != nil {
		return err
	}

	// In case the deployment config has been marked for deletion, merely update its status with
	// the latest available information. Some deletions make take some time to complete so there
	// is value in doing this.
	if config.DeletionTimestamp != nil {
		return c.updateStatus(config, existingDeployments)
	}

	latestIsDeployed, latestDeployment := deployutil.LatestDeploymentInfo(config, existingDeployments)
	// If the latest deployment doesn't exist yet, cancel any running
	// deployments to allow them to be superceded by the new config version.
	awaitingCancellations := false
	if !latestIsDeployed {
		for i := range existingDeployments {
			deployment := existingDeployments[i]
			// Skip deployments with an outcome.
			if deployutil.IsTerminatedDeployment(&deployment) {
				continue
			}
			// Cancel running deployments.
			awaitingCancellations = true
			if !deployutil.IsDeploymentCancelled(&deployment) {

				// Retry faster on conflicts
				var updatedDeployment *kapi.ReplicationController
				if err := kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
					rc, err := c.rcStore.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
					if kapierrors.IsNotFound(err) {
						return nil
					}
					if err != nil {
						return err
					}
					copied, err := deployutil.DeploymentDeepCopy(rc)
					if err != nil {
						return err
					}
					copied.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue
					copied.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledNewerDeploymentExists
					updatedDeployment, err = c.rn.ReplicationControllers(copied.Namespace).Update(copied)
					return err
				}); err != nil {
					c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCancellationFailed", "Failed to cancel deployment %q superceded by version %d: %s", deployment.Name, config.Status.LatestVersion, err)
				} else {
					if updatedDeployment != nil {
						// replace the current deployment with the updated copy so that a future update has a chance at working
						existingDeployments[i] = *updatedDeployment
						c.recorder.Eventf(config, kapi.EventTypeNormal, "DeploymentCancelled", "Cancelled deployment %q superceded by version %d", deployment.Name, config.Status.LatestVersion)
					}
				}
			}
		}
	}
	// Wait for deployment cancellations before reconciling or creating a new
	// deployment to avoid competing with existing deployment processes.
	if awaitingCancellations {
		c.recorder.Eventf(config, kapi.EventTypeNormal, "DeploymentAwaitingCancellation", "Deployment of version %d awaiting cancellation of older running deployments", config.Status.LatestVersion)
		return fmt.Errorf("found previous inflight deployment for %s - requeuing", deployutil.LabelForDeploymentConfig(config))
	}
	// If the latest deployment already exists, reconcile existing deployments
	// and return early.
	if latestIsDeployed {
		// If the latest deployment is still running, try again later. We don't
		// want to compete with the deployer.
		if !deployutil.IsTerminatedDeployment(latestDeployment) {
			return c.updateStatus(config, existingDeployments)
		}

		return c.reconcileDeployments(existingDeployments, config)
	}
	// If the config is paused we shouldn't create new deployments for it.
	if config.Spec.Paused {
		// in order for revision history limit cleanup to work for paused
		// deployments, we need to trigger it here
		if err := c.cleanupOldDeployments(existingDeployments, config); err != nil {
			c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCleanupFailed", "Couldn't clean up deployments: %v", err)
		}

		return c.updateStatus(config, existingDeployments)
	}
	// No deployments are running and the latest deployment doesn't exist, so
	// create the new deployment.
	deployment, err := deployutil.MakeDeployment(config, c.codec)
	if err != nil {
		return fatalError(fmt.Sprintf("couldn't make deployment from (potentially invalid) deployment config %s: %v", deployutil.LabelForDeploymentConfig(config), err))
	}
	created, err := c.rn.ReplicationControllers(config.Namespace).Create(deployment)
	if err != nil {
		// If the deployment was already created, just move on. The cache could be
		// stale, or another process could have already handled this update.
		if kapierrors.IsAlreadyExists(err) {
			return c.updateStatus(config, existingDeployments)
		}
		c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCreationFailed", "Couldn't deploy version %d: %s", config.Status.LatestVersion, err)
		return fmt.Errorf("couldn't create deployment for deployment config %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}
	c.recorder.Eventf(config, kapi.EventTypeNormal, "DeploymentCreated", "Created new deployment %q for version %d", created.Name, config.Status.LatestVersion)

	// As we've just created a new deployment, we need to make sure to clean
	// up old deployments if we have reached our deployment history quota
	existingDeployments = append(existingDeployments, *created)
	if err := c.cleanupOldDeployments(existingDeployments, config); err != nil {
		c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCleanupFailed", "Couldn't clean up deployments: %v", err)
	}

	return c.updateStatus(config, existingDeployments)
}
Exemplo n.º 10
0
// Handle processes deployment and either creates a deployer pod or responds
// to a terminal deployment status. Since this controller started using caches,
// the provided rc MUST be deep-copied beforehand (see work() in factory.go).
func (c *DeploymentController) Handle(deployment *kapi.ReplicationController) error {
	// Copy all the annotations from the deployment.
	updatedAnnotations := make(map[string]string)
	for key, value := range deployment.Annotations {
		updatedAnnotations[key] = value
	}

	currentStatus := deployutil.DeploymentStatusFor(deployment)
	nextStatus := currentStatus

	deployerPodName := deployutil.DeployerPodNameForDeployment(deployment.Name)
	deployer, deployerErr := c.getPod(deployment.Namespace, deployerPodName)
	if deployerErr == nil {
		nextStatus = c.nextStatus(deployer, deployment, updatedAnnotations)
	}

	switch currentStatus {
	case deployapi.DeploymentStatusNew:
		// If the deployment has been cancelled, don't create a deployer pod.
		// Instead try to delete any deployer pods found and transition the
		// deployment to Pending so that the deployment config controller
		// continues to see the deployment as in-flight. Eventually the deletion
		// of the deployer pod should cause a requeue of this deployment and
		// then it can be transitioned to Failed by this controller.
		if deployutil.IsDeploymentCancelled(deployment) {
			nextStatus = deployapi.DeploymentStatusPending
			if err := c.cleanupDeployerPods(deployment); err != nil {
				return err
			}
			break
		}

		// If the pod already exists, it's possible that a previous CreatePod
		// succeeded but the deployment state update failed and now we're re-
		// entering. Ensure that the pod is the one we created by verifying the
		// annotation on it, and throw a retryable error.
		if deployerErr != nil && !kerrors.IsNotFound(deployerErr) {
			return fmt.Errorf("couldn't fetch existing deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), deployerErr)
		}
		if deployerErr == nil && deployer != nil {
			// Do a stronger check to validate that the existing deployer pod is
			// actually for this deployment, and if not, fail this deployment.
			//
			// TODO: Investigate checking the container image of the running pod and
			// comparing with the intended deployer pod image. If we do so, we'll need
			// to ensure that changes to 'unrelated' pods don't result in updates to
			// the deployment. So, the image check will have to be done in other areas
			// of the code as well.
			if deployutil.DeploymentNameFor(deployer) != deployment.Name {
				nextStatus = deployapi.DeploymentStatusFailed
				updatedAnnotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedUnrelatedDeploymentExists
				c.emitDeploymentEvent(deployment, kapi.EventTypeWarning, "FailedCreate", fmt.Sprintf("Error creating deployer pod since another pod with the same name (%q) exists", deployer.Name))
				glog.V(2).Infof("Couldn't create deployer pod for %s since an unrelated pod with the same name (%q) exists", deployutil.LabelForDeployment(deployment), deployer.Name)
			} else {
				// Update to pending or to the appropriate status relative to the existing validated deployer pod.
				updatedAnnotations[deployapi.DeploymentPodAnnotation] = deployer.Name
				nextStatus = nextStatusComp(nextStatus, deployapi.DeploymentStatusPending)
				glog.V(4).Infof("Detected existing deployer pod %s for deployment %s", deployer.Name, deployutil.LabelForDeployment(deployment))
			}
			// Don't try and re-create the deployer pod.
			break
		}

		if _, ok := deployment.Annotations[deployapi.DeploymentIgnorePodAnnotation]; ok {
			return nil
		}

		// Generate a deployer pod spec.
		deployerPod, err := c.makeDeployerPod(deployment)
		if err != nil {
			return fatalError(fmt.Sprintf("couldn't make deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err))
		}
		// Create the deployer pod.
		deploymentPod, err := c.pn.Pods(deployment.Namespace).Create(deployerPod)
		// Retry on error.
		if err != nil {
			return actionableError(fmt.Sprintf("couldn't create deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err))
		}
		updatedAnnotations[deployapi.DeploymentPodAnnotation] = deploymentPod.Name
		nextStatus = deployapi.DeploymentStatusPending
		glog.V(4).Infof("Created deployer pod %s for deployment %s", deploymentPod.Name, deployutil.LabelForDeployment(deployment))

	case deployapi.DeploymentStatusPending, deployapi.DeploymentStatusRunning:
		switch {
		case kerrors.IsNotFound(deployerErr):
			nextStatus = deployapi.DeploymentStatusFailed
			// If the deployment is cancelled here then we deleted the deployer in a previous
			// resync of the deployment.
			if !deployutil.IsDeploymentCancelled(deployment) {
				updatedAnnotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedDeployerPodNoLongerExists
				c.emitDeploymentEvent(deployment, kapi.EventTypeWarning, "Failed", fmt.Sprintf("Deployer pod %q has gone missing", deployerPodName))
				deployerErr = fmt.Errorf("Failing deployment %q because its deployer pod %q disappeared", deployutil.LabelForDeployment(deployment), deployerPodName)
				utilruntime.HandleError(deployerErr)
			}

		case deployerErr != nil:
			// We'll try again later on resync. Continue to process cancellations.
			deployerErr = fmt.Errorf("Error getting deployer pod %q for deployment %q: %v", deployerPodName, deployutil.LabelForDeployment(deployment), deployerErr)
			utilruntime.HandleError(deployerErr)

		default: /* err == nil */
			// If the deployment has been cancelled, delete any deployer pods
			// found and transition the deployment to Pending so that the
			// deployment config controller continues to see the deployment
			// as in-flight. Eventually the deletion of the deployer pod should
			// cause a requeue of this deployment and then it can be transitioned
			// to Failed by this controller.
			if deployutil.IsDeploymentCancelled(deployment) {
				if err := c.cleanupDeployerPods(deployment); err != nil {
					return err
				}
			}
		}

	case deployapi.DeploymentStatusFailed:
		// Try to cleanup once more a cancelled deployment in case hook pods
		// were created just after we issued the first cleanup request.
		if deployutil.IsDeploymentCancelled(deployment) {
			if err := c.cleanupDeployerPods(deployment); err != nil {
				return err
			}
		}

	case deployapi.DeploymentStatusComplete:
		if err := c.cleanupDeployerPods(deployment); err != nil {
			return err
		}
	}

	// Update only if we need to transition to a new phase.
	if deployutil.CanTransitionPhase(currentStatus, nextStatus) {
		deployment, err := deployutil.DeploymentDeepCopy(deployment)
		if err != nil {
			return err
		}

		updatedAnnotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus)
		deployment.Annotations = updatedAnnotations

		// if we are going to transition to failed or complete and scale is non-zero, we'll check one more
		// time to see if we are a test deployment to guarantee that we maintain the test invariant.
		if deployment.Spec.Replicas != 0 && deployutil.IsTerminatedDeployment(deployment) {
			if config, err := deployutil.DecodeDeploymentConfig(deployment, c.codec); err == nil && config.Spec.Test {
				deployment.Spec.Replicas = 0
			}
		}

		if _, err := c.rn.ReplicationControllers(deployment.Namespace).Update(deployment); err != nil {
			return fmt.Errorf("couldn't update deployment %s to status %s: %v", deployutil.LabelForDeployment(deployment), nextStatus, err)
		}
		glog.V(4).Infof("Updated deployment %s status from %s to %s (scale: %d)", deployutil.LabelForDeployment(deployment), currentStatus, nextStatus, deployment.Spec.Replicas)
	}
	return nil
}
Exemplo n.º 11
0
			g.By("verifying that both latestVersion and generation are updated")
			version, err := oc.Run("get").Args(resource, "--output=jsonpath=\"{.status.latestVersion}\"").Output()
			version = strings.Trim(version, "\"")
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By(fmt.Sprintf("checking the latest version for %s: %s", resource, version))
			o.Expect(version).To(o.ContainSubstring("1"))
			generation, err := oc.Run("get").Args(resource, "--output=jsonpath=\"{.metadata.generation}\"").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By(fmt.Sprintf("checking the generation for %s: %s", resource, generation))
			o.Expect(generation).To(o.ContainSubstring("1"))

			g.By("verifying the deployment is marked complete")
			err = wait.Poll(100*time.Millisecond, 1*time.Minute, func() (bool, error) {
				rc, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).Get(name + "-" + version)
				o.Expect(err).NotTo(o.HaveOccurred())
				return deployutil.IsTerminatedDeployment(rc), nil
			})
			o.Expect(err).NotTo(o.HaveOccurred())

			g.By("verifying that scaling updates the generation")
			_, err = oc.Run("scale").Args(resource, "--replicas=2").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			generation, err = oc.Run("get").Args(resource, "--output=jsonpath=\"{.metadata.generation}\"").Output()
			o.Expect(err).NotTo(o.HaveOccurred())
			g.By(fmt.Sprintf("checking the generation for %s: %s", resource, generation))
			o.Expect(generation).To(o.ContainSubstring("2"))

			g.By("deploying a second time [new client]")
			_, err = oc.Run("deploy").Args("--latest", name).Output()
			o.Expect(err).NotTo(o.HaveOccurred())
Exemplo n.º 12
0
// Handle syncs pod's status with any associated deployment.
func (c *DeployerPodController) Handle(pod *kapi.Pod) error {
	// Find the deployment associated with the deployer pod.
	deploymentName := deployutil.DeploymentNameFor(pod)
	if len(deploymentName) == 0 {
		return nil
	}
	// Reject updates to anything but the main deployer pod
	// TODO: Find a way to filter this on the watch side.
	if pod.Name != deployutil.DeployerPodNameForDeployment(deploymentName) {
		return nil
	}

	deployment, err := c.deploymentClient.getDeployment(pod.Namespace, deploymentName)
	// If the deployment for this pod has disappeared, we should clean up this
	// and any other deployer pods, then bail out.
	if err != nil {
		// Some retrieval error occurred. Retry.
		if !kerrors.IsNotFound(err) {
			return fmt.Errorf("couldn't get deployment %s/%s which owns deployer pod %s/%s", pod.Namespace, deploymentName, pod.Name, pod.Namespace)
		}
		// Find all the deployer pods for the deployment (including this one).
		deployers, err := c.deployerPodsFor(pod.Namespace, deploymentName)
		if err != nil {
			// Retry.
			return fmt.Errorf("couldn't get deployer pods for %s: %v", deployutil.LabelForDeployment(deployment), err)
		}
		// Delete all deployers.
		for _, deployer := range deployers.Items {
			err := c.deletePod(deployer.Namespace, deployer.Name)
			if err != nil {
				if !kerrors.IsNotFound(err) {
					// TODO: Should this fire an event?
					glog.V(2).Infof("Couldn't delete orphaned deployer pod %s/%s: %v", deployer.Namespace, deployer.Name, err)
				}
			} else {
				// TODO: Should this fire an event?
				glog.V(2).Infof("Deleted orphaned deployer pod %s/%s", deployer.Namespace, deployer.Name)
			}
		}
		return nil
	}

	currentStatus := deployutil.DeploymentStatusFor(deployment)
	nextStatus := currentStatus

	switch pod.Status.Phase {
	case kapi.PodRunning:
		if !deployutil.IsTerminatedDeployment(deployment) {
			nextStatus = deployapi.DeploymentStatusRunning
		}
	case kapi.PodSucceeded:
		// Detect failure based on the container state
		nextStatus = deployapi.DeploymentStatusComplete
		for _, info := range pod.Status.ContainerStatuses {
			if info.State.Terminated != nil && info.State.Terminated.ExitCode != 0 {
				nextStatus = deployapi.DeploymentStatusFailed
				break
			}
		}
		// Sync the internal replica annotation with the target so that we can
		// distinguish deployer updates from other scaling events.
		deployment.Annotations[deployapi.DeploymentReplicasAnnotation] = deployment.Annotations[deployapi.DesiredReplicasAnnotation]
		if nextStatus == deployapi.DeploymentStatusComplete {
			delete(deployment.Annotations, deployapi.DesiredReplicasAnnotation)
		}
	case kapi.PodFailed:
		nextStatus = deployapi.DeploymentStatusFailed
	}

	if currentStatus != nextStatus {
		deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus)
		if _, err := c.deploymentClient.updateDeployment(deployment.Namespace, deployment); err != nil {
			if kerrors.IsNotFound(err) {
				return nil
			}
			return fmt.Errorf("couldn't update Deployment %s to status %s: %v", deployutil.LabelForDeployment(deployment), nextStatus, err)
		}
		glog.V(4).Infof("Updated Deployment %s status from %s to %s", deployutil.LabelForDeployment(deployment), currentStatus, nextStatus)
	}

	return nil
}
Exemplo n.º 13
0
func (o CancelOptions) Run() error {
	allErrs := []error{}
	for _, info := range o.Infos {
		config, ok := info.Object.(*deployapi.DeploymentConfig)
		if !ok {
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("cancelling", info.Source, fmt.Errorf("expected deployment configuration, got %T", info.Object)))
		}
		if config.Spec.Paused {
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("cancelling", info.Source, fmt.Errorf("unable to cancel paused deployment %s/%s", config.Namespace, config.Name)))
		}

		mapping, err := o.Mapper.RESTMapping(kapi.Kind("ReplicationController"))
		if err != nil {
			return err
		}

		mutateFn := func(rc *kapi.ReplicationController) bool {
			if deployutil.IsDeploymentCancelled(rc) {
				kcmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, false, "already cancelled")
				return false
			}

			patches := set.CalculatePatches([]*resource.Info{{Object: rc, Mapping: mapping}}, o.Encoder, func(*resource.Info) (bool, error) {
				rc.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue
				rc.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledByUser
				return true, nil
			})

			if len(patches) == 0 {
				kcmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, false, "already cancelled")
				return false
			}

			_, err := o.Clientset.ReplicationControllers(rc.Namespace).Patch(rc.Name, kapi.StrategicMergePatchType, patches[0].Patch)
			if err != nil {
				allErrs = append(allErrs, kcmdutil.AddSourceToErr("cancelling", info.Source, err))
				return false
			}
			kcmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, false, "cancelling")
			return true
		}

		deployments, cancelled, err := o.forEachControllerInConfig(config.Namespace, config.Name, mutateFn)
		if err != nil {
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("cancelling", info.Source, err))
			continue
		}

		if !cancelled {
			latest := &deployments[0]
			maybeCancelling := ""
			if deployutil.IsDeploymentCancelled(latest) && !deployutil.IsTerminatedDeployment(latest) {
				maybeCancelling = " (cancelling)"
			}
			timeAt := strings.ToLower(units.HumanDuration(time.Now().Sub(latest.CreationTimestamp.Time)))
			fmt.Fprintf(o.Out, "No rollout is in progress (latest rollout #%d %s%s %s ago)\n",
				deployutil.DeploymentVersionFor(latest),
				strings.ToLower(string(deployutil.DeploymentStatusFor(latest))),
				maybeCancelling,
				timeAt)
		}

	}
	return utilerrors.NewAggregate(allErrs)
}