Пример #1
0
func printDeploymentRc(deployment *kapi.ReplicationController, client deploymentDescriberClient, w io.Writer, header string, verbose bool) error {
	if len(header) > 0 {
		fmt.Fprintf(w, "%v:\n", header)
	}

	if verbose {
		fmt.Fprintf(w, "\tName:\t%s\n", deployment.Name)
	}
	timeAt := strings.ToLower(formatRelativeTime(deployment.CreationTimestamp.Time))
	fmt.Fprintf(w, "\tCreated:\t%s ago\n", timeAt)
	fmt.Fprintf(w, "\tStatus:\t%s\n", deployutil.DeploymentStatusFor(deployment))
	fmt.Fprintf(w, "\tReplicas:\t%d current / %d desired\n", deployment.Status.Replicas, deployment.Spec.Replicas)

	if verbose {
		fmt.Fprintf(w, "\tSelector:\t%s\n", formatLabels(deployment.Spec.Selector))
		fmt.Fprintf(w, "\tLabels:\t%s\n", formatLabels(deployment.Labels))
		running, waiting, succeeded, failed, err := getPodStatusForDeployment(deployment, client)
		if err != nil {
			return err
		}
		fmt.Fprintf(w, "\tPods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed)
	}

	return nil
}
func describeDeployments(node *graph.DeploymentConfigNode, count int) []string {
	if node == nil {
		return nil
	}
	out := []string{}
	deployments := node.Deployments

	if node.ActiveDeployment == nil {
		on, auto := describeDeploymentConfigTriggers(node.DeploymentConfig)
		if node.DeploymentConfig.LatestVersion == 0 {
			out = append(out, fmt.Sprintf("#1 deployment waiting %s", on))
		} else if auto {
			out = append(out, fmt.Sprintf("#%d deployment pending %s", node.DeploymentConfig.LatestVersion, on))
		}
		// TODO: detect new image available?
	} else {
		deployments = append([]*kapi.ReplicationController{node.ActiveDeployment}, deployments...)
	}

	for i, deployment := range deployments {
		out = append(out, describeDeploymentStatus(deployment, i == 0))

		switch {
		case count == -1:
			if deployutil.DeploymentStatusFor(deployment) == deployapi.DeploymentStatusComplete {
				return out
			}
		default:
			if i+1 >= count {
				return out
			}
		}
	}
	return out
}
// TestHandle_runningPod ensures that a running deployer pod results in a
// transition of the deployment's status to running.
func TestHandle_runningPod(t *testing.T) {
	deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), kapi.Codec)
	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusPending)
	var updatedDeployment *kapi.ReplicationController

	controller := &DeployerPodController{
		deploymentClient: &deploymentClientImpl{
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				return deployment, nil
			},
			updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {
				updatedDeployment = deployment
				return deployment, nil
			},
		},
	}

	err := controller.Handle(runningPod(deployment))

	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if updatedDeployment == nil {
		t.Fatalf("expected deployment update")
	}

	if e, a := deployapi.DeploymentStatusRunning, deployutil.DeploymentStatusFor(updatedDeployment); e != a {
		t.Fatalf("expected updated deployment status %s, got %s", e, a)
	}
}
// TestHandle_deploymentCleanupTransientError ensures that a failure
// to clean up a failed deployment results in a transient error
// and the deployment status is not set to Failed.
func TestHandle_deploymentCleanupTransientError(t *testing.T) {
	completedDeployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), kapi.Codec)
	completedDeployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusComplete)
	currentDeployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(2), kapi.Codec)
	currentDeployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusRunning)
	currentDeployment.Annotations[deployapi.DesiredReplicasAnnotation] = "2"

	controller := &DeployerPodController{
		deploymentClient: &deploymentClientImpl{
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				return currentDeployment, nil
			},
			updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {
				// simulate failure ONLY for the completed deployment
				if deployutil.DeploymentStatusFor(deployment) == deployapi.DeploymentStatusComplete {
					return nil, fmt.Errorf("test failure in updating completed deployment")
				}
				return deployment, nil
			},
			listDeploymentsForConfigFunc: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
				return &kapi.ReplicationControllerList{Items: []kapi.ReplicationController{*currentDeployment, *completedDeployment}}, nil
			},
		},
	}

	err := controller.Handle(terminatedPod(currentDeployment))

	if err == nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if _, isTransient := err.(transientError); !isTransient {
		t.Fatalf("expected transientError on failure to update deployment")
	}

	if e, a := deployapi.DeploymentStatusRunning, deployutil.DeploymentStatusFor(currentDeployment); e != a {
		t.Fatalf("expected updated deployment status to remain %s, got %s", e, a)
	}
}
Пример #5
0
// retry resets the status of the latest deployment to New, which will cause
// the deployment to be retried. An error is returned if the deployment is not
// currently in a failed state.
func (c *retryDeploymentCommand) retry(config *deployapi.DeploymentConfig, out io.Writer) error {
	if config.LatestVersion == 0 {
		return fmt.Errorf("no deployments found for %s/%s", config.Namespace, config.Name)
	}
	deploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := c.client.GetDeployment(config.Namespace, deploymentName)
	if err != nil {
		if kerrors.IsNotFound(err) {
			return fmt.Errorf("Unable to find the latest deployment (#%d).\nYou can start a new deployment using the --latest option.", config.LatestVersion)
		}
		return err
	}

	if status := deployutil.DeploymentStatusFor(deployment); status != deployapi.DeploymentStatusFailed {
		message := fmt.Sprintf("#%d is %s; only failed deployments can be retried.\n", config.LatestVersion, status)
		if status == deployapi.DeploymentStatusComplete {
			message += fmt.Sprintf("You can start a new deployment using the --latest option.")
		} else {
			message += fmt.Sprintf("Optionally, you can cancel this deployment using the --cancel option.", config.LatestVersion)
		}

		return fmt.Errorf(message)
	}

	// Delete the deployer pod as well as the deployment hooks pods, if any
	pods, err := c.client.ListDeployerPodsFor(config.Namespace, deploymentName)
	if err != nil {
		return fmt.Errorf("Failed to list deployer/hook pods for deployment #%d: %v", config.LatestVersion, err)
	}
	for _, pod := range pods.Items {
		err := c.client.DeletePod(&pod)
		if err != nil {
			return fmt.Errorf("Failed to delete deployer/hook pod %s for deployment #%d: %v", pod.Name, config.LatestVersion, err)
		}
	}

	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)
	// clear out the cancellation flag as well as any previous status-reason annotation
	delete(deployment.Annotations, deployapi.DeploymentStatusReasonAnnotation)
	delete(deployment.Annotations, deployapi.DeploymentCancelledAnnotation)
	_, err = c.client.UpdateDeployment(deployment)
	if err == nil {
		fmt.Fprintf(out, "retried #%d\n", config.LatestVersion)
	}
	return err
}
Пример #6
0
// cancel cancels any deployment process in progress for config.
func (c *cancelDeploymentCommand) cancel(config *deployapi.DeploymentConfig, out io.Writer) error {
	deployments, err := c.client.ListDeploymentsForConfig(config.Namespace, config.Name)
	if err != nil {
		return err
	}
	if len(deployments.Items) == 0 {
		fmt.Fprintln(out, "no deployments found to cancel")
		return nil
	}
	failedCancellations := []string{}
	for _, deployment := range deployments.Items {
		status := deployutil.DeploymentStatusFor(&deployment)

		switch status {
		case deployapi.DeploymentStatusNew,
			deployapi.DeploymentStatusPending,
			deployapi.DeploymentStatusRunning:

			if deployutil.IsDeploymentCancelled(&deployment) {
				continue
			}

			deployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue
			deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledByUser
			_, err := c.client.UpdateDeployment(&deployment)
			if err == nil {
				fmt.Fprintf(out, "cancelled #%d\n", config.LatestVersion)
			} else {
				fmt.Fprintf(out, "couldn't cancel deployment %d (status: %s): %v\n", deployutil.DeploymentVersionFor(&deployment), status, err)
				failedCancellations = append(failedCancellations, strconv.Itoa(deployutil.DeploymentVersionFor(&deployment)))
			}
		default:
			fmt.Fprintln(out, "no active deployments to cancel")
		}
	}
	if len(failedCancellations) == 0 {
		return nil
	} else {
		return fmt.Errorf("couldn't cancel deployment %s", strings.Join(failedCancellations, ", "))
	}
}
func describeDeploymentStatus(deploy *kapi.ReplicationController, first bool) string {
	timeAt := strings.ToLower(formatRelativeTime(deploy.CreationTimestamp.Time))
	status := deployutil.DeploymentStatusFor(deploy)
	version := deployutil.DeploymentVersionFor(deploy)
	switch status {
	case deployapi.DeploymentStatusFailed:
		reason := deployutil.DeploymentStatusReasonFor(deploy)
		if len(reason) > 0 {
			reason = fmt.Sprintf(": %s", reason)
		}
		// TODO: encode fail time in the rc
		return fmt.Sprintf("#%d deployment failed %s ago%s%s", version, timeAt, reason, describeDeploymentPodSummaryInline(deploy, false))
	case deployapi.DeploymentStatusComplete:
		// TODO: pod status output
		return fmt.Sprintf("#%d deployed %s ago%s", version, timeAt, describeDeploymentPodSummaryInline(deploy, first))
	case deployapi.DeploymentStatusRunning:
		return fmt.Sprintf("#%d deployment running for %s%s", version, timeAt, describeDeploymentPodSummaryInline(deploy, false))
	default:
		return fmt.Sprintf("#%d deployment %s %s ago%s", version, strings.ToLower(string(status)), timeAt, describeDeploymentPodSummaryInline(deploy, false))
	}
}
Пример #8
0
// deploy launches a new deployment unless there's already a deployment
// process in progress for config.
func (c *deployLatestCommand) deploy(config *deployapi.DeploymentConfig, out io.Writer) error {
	deploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := c.client.GetDeployment(config.Namespace, deploymentName)
	if err != nil {
		if !kerrors.IsNotFound(err) {
			return err
		}
	} else {
		// Reject attempts to start a concurrent deployment.
		status := deployutil.DeploymentStatusFor(deployment)
		if status != deployapi.DeploymentStatusComplete && status != deployapi.DeploymentStatusFailed {
			return fmt.Errorf("#%d is already in progress (%s).\nOptionally, you can cancel this deployment using the --cancel option.", config.LatestVersion, status)
		}
	}

	config.LatestVersion++
	_, err = c.client.UpdateDeploymentConfig(config)
	if err == nil {
		fmt.Fprintf(out, "deployed #%d\n", config.LatestVersion)
	}
	return err
}
// TestHandle_podTerminatedFailNoContainerStatus ensures that a failed
// deployer pod with no container status results in a transition of the
// deployment's status to failed.
func TestHandle_podTerminatedFailNoContainerStatus(t *testing.T) {
	var updatedDeployment *kapi.ReplicationController
	deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), kapi.Codec)
	// since we do not set the desired replicas annotation,
	// this also tests that the error is just logged and not result in a failure
	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusRunning)

	controller := &DeployerPodController{
		deploymentClient: &deploymentClientImpl{
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				return deployment, nil
			},
			updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {
				updatedDeployment = deployment
				return deployment, nil
			},
			listDeploymentsForConfigFunc: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
				return &kapi.ReplicationControllerList{Items: []kapi.ReplicationController{*deployment}}, nil
			},
		},
	}

	err := controller.Handle(terminatedPod(deployment))

	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if updatedDeployment == nil {
		t.Fatalf("expected deployment update")
	}

	if e, a := deployapi.DeploymentStatusFailed, deployutil.DeploymentStatusFor(updatedDeployment); e != a {
		t.Fatalf("expected updated deployment status %s, got %s", e, a)
	}
}
Пример #10
0
// Handle processes deployment and either creates a deployer pod or responds
// to a terminal deployment status.
func (c *DeploymentController) Handle(deployment *kapi.ReplicationController) error {
	currentStatus := deployutil.DeploymentStatusFor(deployment)
	nextStatus := currentStatus

	switch currentStatus {
	case deployapi.DeploymentStatusNew:
		// If the deployment has been cancelled, don't create a deployer pod, and
		// transition to failed immediately.
		if deployutil.IsDeploymentCancelled(deployment) {
			nextStatus = deployapi.DeploymentStatusFailed
			break
		}

		// Generate a deployer pod spec.
		podTemplate, err := c.makeDeployerPod(deployment)
		if err != nil {
			return fatalError(fmt.Sprintf("couldn't make deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err))
		}

		// Create the deployer pod.
		deploymentPod, err := c.podClient.createPod(deployment.Namespace, podTemplate)
		if err == nil {
			deployment.Annotations[deployapi.DeploymentPodAnnotation] = deploymentPod.Name
			nextStatus = deployapi.DeploymentStatusPending
			glog.V(4).Infof("Created pod %s for deployment %s", deploymentPod.Name, deployutil.LabelForDeployment(deployment))
			break
		}

		// Retry on error.
		if !kerrors.IsAlreadyExists(err) {
			c.recorder.Eventf(deployment, "failedCreate", "Error creating deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
			return fmt.Errorf("couldn't create deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
		}

		// If the pod already exists, it's possible that a previous CreatePod
		// succeeded but the deployment state update failed and now we're re-
		// entering. Ensure that the pod is the one we created by verifying the
		// annotation on it, and throw a retryable error.
		existingPod, err := c.podClient.getPod(deployment.Namespace, deployutil.DeployerPodNameForDeployment(deployment.Name))
		if err != nil {
			c.recorder.Eventf(deployment, "failedCreate", "Error getting existing deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
			return fmt.Errorf("couldn't fetch existing deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
		}

		// Do a stronger check to validate that the existing deployer pod is
		// actually for this deployment, and if not, fail this deployment.
		//
		// TODO: Investigate checking the container image of the running pod and
		// comparing with the intended deployer pod image. If we do so, we'll need
		// to ensure that changes to 'unrelated' pods don't result in updates to
		// the deployment. So, the image check will have to be done in other areas
		// of the code as well.
		if deployutil.DeploymentNameFor(existingPod) != deployment.Name {
			nextStatus = deployapi.DeploymentStatusFailed
			deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedUnrelatedDeploymentExists
			c.recorder.Eventf(deployment, "failedCreate", "Error creating deployer pod for %s since another pod with the same name (%q) exists", deployutil.LabelForDeployment(deployment), existingPod.Name)
			glog.V(2).Infof("Couldn't create deployer pod for %s since an unrelated pod with the same name (%q) exists", deployutil.LabelForDeployment(deployment), existingPod.Name)
			break
		}

		// Update to pending relative to the existing validated deployer pod.
		deployment.Annotations[deployapi.DeploymentPodAnnotation] = existingPod.Name
		nextStatus = deployapi.DeploymentStatusPending
		glog.V(4).Infof("Detected existing deployer pod %s for deployment %s", existingPod.Name, deployutil.LabelForDeployment(deployment))
	case deployapi.DeploymentStatusPending, deployapi.DeploymentStatusRunning:
		// If the deployer pod has vanished, consider the deployment a failure.
		deployerPodName := deployutil.DeployerPodNameForDeployment(deployment.Name)
		if _, err := c.podClient.getPod(deployment.Namespace, deployerPodName); err != nil {
			if kerrors.IsNotFound(err) {
				nextStatus = deployapi.DeploymentStatusFailed
				deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus)
				deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedDeployerPodNoLongerExists
				c.recorder.Eventf(deployment, "failed", "Deployer pod %q has gone missing", deployerPodName)
				glog.V(4).Infof("Failing deployment %q because its deployer pod %q disappeared", deployutil.LabelForDeployment(deployment), deployerPodName)
				break
			} else {
				// We'll try again later on resync. Continue to process cancellations.
				glog.V(2).Infof("Error getting deployer pod %s for deployment %s: %#v", deployerPodName, deployutil.LabelForDeployment(deployment), err)
			}
		}

		// If the deployment is cancelled, terminate any deployer/hook pods.
		// NOTE: Do not mark the deployment as Failed just yet.
		// The deployment will be marked as Failed by the deployer pod controller
		// when the deployer pod failure state is picked up
		// Also, it will scale down the failed deployment and scale back up
		// the last successful completed deployment
		if deployutil.IsDeploymentCancelled(deployment) {
			deployerPods, err := c.podClient.getDeployerPodsFor(deployment.Namespace, deployment.Name)
			if err != nil {
				return fmt.Errorf("couldn't fetch deployer pods for %s while trying to cancel deployment: %v", deployutil.LabelForDeployment(deployment), err)
			}
			glog.V(4).Infof("Cancelling %d deployer pods for deployment %s", len(deployerPods), deployutil.LabelForDeployment(deployment))
			zeroDelay := int64(1)
			for _, deployerPod := range deployerPods {
				// Set the ActiveDeadlineSeconds on the pod so it's terminated very soon.
				if deployerPod.Spec.ActiveDeadlineSeconds == nil || *deployerPod.Spec.ActiveDeadlineSeconds != zeroDelay {
					deployerPod.Spec.ActiveDeadlineSeconds = &zeroDelay
					if _, err := c.podClient.updatePod(deployerPod.Namespace, &deployerPod); err != nil {
						c.recorder.Eventf(deployment, "failedCancellation", "Error cancelling deployer pod %s for deployment %s: %v", deployerPod.Name, deployutil.LabelForDeployment(deployment), err)
						return fmt.Errorf("couldn't cancel deployer pod %s for deployment %s: %v", deployutil.LabelForDeployment(deployment), err)
					}
					glog.V(4).Infof("Cancelled deployer pod %s for deployment %s", deployerPod.Name, deployutil.LabelForDeployment(deployment))
				}
			}
			c.recorder.Eventf(deployment, "cancelled", "Cancelled deployment")
		}
	case deployapi.DeploymentStatusFailed:
		// Nothing to do in this terminal state.
		glog.V(4).Infof("Ignoring deployment %s (status %s)", deployutil.LabelForDeployment(deployment), currentStatus)
	case deployapi.DeploymentStatusComplete:
		// now list any pods in the namespace that have the specified label
		deployerPods, err := c.podClient.getDeployerPodsFor(deployment.Namespace, deployment.Name)
		if err != nil {
			return fmt.Errorf("couldn't fetch deployer pods for %s after successful completion: %v", deployutil.LabelForDeployment(deployment), err)
		}
		glog.V(4).Infof("Deleting %d deployer pods for deployment %s", len(deployerPods), deployutil.LabelForDeployment(deployment))
		cleanedAll := true
		for _, deployerPod := range deployerPods {
			if err := c.podClient.deletePod(deployerPod.Namespace, deployerPod.Name); err != nil {
				if !kerrors.IsNotFound(err) {
					// if the pod deletion failed, then log the error and continue
					// we will try to delete any remaining deployer pods and return an error later
					kutil.HandleError(fmt.Errorf("couldn't delete completed deployer pod %s/%s for deployment %s: %v", deployment.Namespace, deployerPod.Name, deployutil.LabelForDeployment(deployment), err))
					cleanedAll = false
				}
				// Already deleted
			} else {
				glog.V(4).Infof("Deleted completed deployer pod %s/%s for deployment %s", deployment.Namespace, deployerPod.Name, deployutil.LabelForDeployment(deployment))
			}
		}

		if !cleanedAll {
			return fmt.Errorf("couldn't clean up all deployer pods for %s", deployutil.LabelForDeployment(deployment))
		}
	}

	if currentStatus != nextStatus {
		deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus)
		if _, err := c.deploymentClient.updateDeployment(deployment.Namespace, deployment); err != nil {
			c.recorder.Eventf(deployment, "failedUpdate", "Error updating deployment %s status to %s", deployutil.LabelForDeployment(deployment), nextStatus)
			return fmt.Errorf("couldn't update deployment %s to status %s: %v", deployutil.LabelForDeployment(deployment), nextStatus, err)
		}
		glog.V(4).Infof("Updated deployment %s status from %s to %s", deployutil.LabelForDeployment(deployment), currentStatus, nextStatus)
	}
	return nil
}
Пример #11
0
// Deploy starts the deployment process for deploymentName.
func (d *Deployer) Deploy(namespace, deploymentName string) error {
	// Look up the new deployment.
	to, err := d.getDeployment(namespace, deploymentName)
	if err != nil {
		return fmt.Errorf("couldn't get deployment %s/%s: %v", namespace, deploymentName, err)
	}

	// Decode the config from the deployment.
	config, err := deployutil.DecodeDeploymentConfig(to, latest.Codec)
	if err != nil {
		return fmt.Errorf("couldn't decode DeploymentConfig from deployment %s/%s: %v", to.Namespace, to.Name, err)
	}

	// Get a strategy for the deployment.
	strategy, err := d.strategyFor(config)
	if err != nil {
		return err
	}

	// New deployments must have a desired replica count.
	desiredReplicas, hasDesired := deployutil.DeploymentDesiredReplicas(to)
	if !hasDesired {
		return fmt.Errorf("deployment %s has no desired replica count", deployutil.LabelForDeployment(to))
	}

	// Find all deployments for the config.
	unsortedDeployments, err := d.getDeployments(namespace, config.Name)
	if err != nil {
		return fmt.Errorf("couldn't get controllers in namespace %s: %v", namespace, err)
	}
	deployments := unsortedDeployments.Items

	// Sort all the deployments by version.
	sort.Sort(deployutil.DeploymentsByLatestVersionDesc(deployments))

	// Find any last completed deployment.
	var from *kapi.ReplicationController
	for _, candidate := range deployments {
		if candidate.Name == to.Name {
			continue
		}
		if deployutil.DeploymentStatusFor(&candidate) == deployapi.DeploymentStatusComplete {
			from = &candidate
			break
		}
	}

	// Scale down any deployments which aren't the new or last deployment.
	for _, candidate := range deployments {
		// Skip the from/to deployments.
		if candidate.Name == to.Name {
			continue
		}
		if from != nil && candidate.Name == from.Name {
			continue
		}
		// Skip the deployment if it's already scaled down.
		if candidate.Spec.Replicas == 0 {
			continue
		}
		// Scale the deployment down to zero.
		retryWaitParams := kubectl.NewRetryParams(1*time.Second, 120*time.Second)
		if err := d.scaler.Scale(candidate.Namespace, candidate.Name, uint(0), &kubectl.ScalePrecondition{-1, ""}, retryWaitParams, retryWaitParams); err != nil {
			glog.Errorf("Couldn't scale down prior deployment %s: %v", deployutil.LabelForDeployment(&candidate), err)
		} else {
			glog.Infof("Scaled down prior deployment %s", deployutil.LabelForDeployment(&candidate))
		}
	}

	// Perform the deployment.
	if from == nil {
		glog.Infof("Deploying %s for the first time (replicas: %d)", deployutil.LabelForDeployment(to), desiredReplicas)
	} else {
		glog.Infof("Deploying from %s to %s (replicas: %d)", deployutil.LabelForDeployment(from), deployutil.LabelForDeployment(to), desiredReplicas)
	}
	return strategy.Deploy(from, to, desiredReplicas)
}
Пример #12
0
// Handle syncs pod's status with any associated deployment.
func (c *DeployerPodController) Handle(pod *kapi.Pod) error {
	// Find the deployment associated with the deployer pod.
	deploymentName := deployutil.DeploymentNameFor(pod)
	if len(deploymentName) == 0 {
		return nil
	}
	// Reject updates to anything but the main deployer pod
	// TODO: Find a way to filter this on the watch side.
	if pod.Name != deployutil.DeployerPodNameForDeployment(deploymentName) {
		return nil
	}

	deployment, err := c.deploymentClient.getDeployment(pod.Namespace, deploymentName)
	// If the deployment for this pod has disappeared, we should clean up this
	// and any other deployer pods, then bail out.
	if err != nil {
		// Some retrieval error occured. Retry.
		if !kerrors.IsNotFound(err) {
			return fmt.Errorf("couldn't get deployment %s/%s which owns deployer pod %s/%s", pod.Namespace, deploymentName, pod.Name, pod.Namespace)
		}
		// Find all the deployer pods for the deployment (including this one).
		deployers, err := c.deployerPodsFor(pod.Namespace, deploymentName)
		if err != nil {
			// Retry.
			return fmt.Errorf("couldn't get deployer pods for %s: %v", deployutil.LabelForDeployment(deployment), err)
		}
		// Delete all deployers.
		for _, deployer := range deployers.Items {
			err := c.deletePod(deployer.Namespace, deployer.Name)
			if err != nil {
				if !kerrors.IsNotFound(err) {
					// TODO: Should this fire an event?
					glog.V(2).Infof("Couldn't delete orphaned deployer pod %s/%s: %v", deployer.Namespace, deployer.Name, err)
				}
			} else {
				// TODO: Should this fire an event?
				glog.V(2).Infof("Deleted orphaned deployer pod %s/%s", deployer.Namespace, deployer.Name)
			}
		}
		return nil
	}

	currentStatus := deployutil.DeploymentStatusFor(deployment)
	nextStatus := currentStatus

	switch pod.Status.Phase {
	case kapi.PodRunning:
		nextStatus = deployapi.DeploymentStatusRunning
	case kapi.PodSucceeded:
		// Detect failure based on the container state
		nextStatus = deployapi.DeploymentStatusComplete
		for _, info := range pod.Status.ContainerStatuses {
			if info.State.Termination != nil && info.State.Termination.ExitCode != 0 {
				nextStatus = deployapi.DeploymentStatusFailed
			}
		}
	case kapi.PodFailed:
		// if the deployment is already marked Failed, do not attempt clean up again
		if currentStatus != deployapi.DeploymentStatusFailed {
			// clean up will also update the deployment status to Failed
			// failure to clean up will result in retries and
			// the deployment will not be marked Failed
			// Note: this will prevent new deployments from being created for this config
			err := c.cleanupFailedDeployment(deployment)
			if err != nil {
				return transientError(fmt.Sprintf("couldn't clean up failed deployment: %v", err))
			}
		}
	}

	if currentStatus != nextStatus {
		deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus)
		if _, err := c.deploymentClient.updateDeployment(deployment.Namespace, deployment); err != nil {
			if kerrors.IsNotFound(err) {
				return nil
			}
			return fmt.Errorf("couldn't update Deployment %s to status %s: %v", deployutil.LabelForDeployment(deployment), nextStatus, err)
		}
		glog.V(4).Infof("Updated Deployment %s status from %s to %s", deployutil.LabelForDeployment(deployment), currentStatus, nextStatus)
	}

	return nil
}
Пример #13
0
func (c *DeployerPodController) cleanupFailedDeployment(deployment *kapi.ReplicationController) error {
	// Scale down the current failed deployment
	configName := deployutil.DeploymentConfigNameFor(deployment)
	existingDeployments, err := c.deploymentClient.listDeploymentsForConfig(deployment.Namespace, configName)
	if err != nil {
		return fmt.Errorf("couldn't list Deployments for DeploymentConfig %s: %v", configName, err)
	}

	desiredReplicas, ok := deployutil.DeploymentDesiredReplicas(deployment)
	if !ok {
		// if desired replicas could not be found, then log the error
		// and update the failed deployment
		// this cannot be treated as a transient error
		kutil.HandleError(fmt.Errorf("Could not determine desired replicas from %s to reset replicas for last completed deployment", deployutil.LabelForDeployment(deployment)))
	}

	if ok && len(existingDeployments.Items) > 0 {
		sort.Sort(deployutil.DeploymentsByLatestVersionDesc(existingDeployments.Items))
		for index, existing := range existingDeployments.Items {
			// if a newer deployment exists:
			// - set the replicas for the current failed deployment to 0
			// - there is no point in scaling up the last completed deployment
			// since that will be scaled down by the later deployment
			if index == 0 && existing.Name != deployment.Name {
				break
			}

			// the latest completed deployment is the one that needs to be scaled back up
			if deployutil.DeploymentStatusFor(&existing) == deployapi.DeploymentStatusComplete {
				if existing.Spec.Replicas == desiredReplicas {
					break
				}

				// scale back the completed deployment to the target of the failed deployment
				existing.Spec.Replicas = desiredReplicas
				if _, err := c.deploymentClient.updateDeployment(existing.Namespace, &existing); err != nil {
					if kerrors.IsNotFound(err) {
						return nil
					}
					return fmt.Errorf("couldn't update replicas to %d for deployment %s: %v", desiredReplicas, deployutil.LabelForDeployment(&existing), err)
				}
				glog.V(4).Infof("Updated replicas to %d for deployment %s", desiredReplicas, deployutil.LabelForDeployment(&existing))

				break
			}
		}
	}
	// set the replicas for the failed deployment to 0
	// and set the status to Failed
	deployment.Spec.Replicas = 0
	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusFailed)
	if _, err := c.deploymentClient.updateDeployment(deployment.Namespace, deployment); err != nil {
		if kerrors.IsNotFound(err) {
			return nil
		}
		return fmt.Errorf("couldn't scale down the deployment %s and mark it as failed: %v", deployutil.LabelForDeployment(deployment), err)
	}
	glog.V(4).Infof("Scaled down the deployment %s and marked it as failed", deployutil.LabelForDeployment(deployment))

	return nil
}
Пример #14
0
// Handle processes config and creates a new deployment if necessary.
func (c *DeploymentConfigController) Handle(config *deployapi.DeploymentConfig) error {
	// Only deploy when the version has advanced past 0.
	if config.LatestVersion == 0 {
		glog.V(5).Infof("Waiting for first version of %s", deployutil.LabelForDeploymentConfig(config))
		return nil
	}

	// Check if any existing inflight deployments (any non-terminal state).
	existingDeployments, err := c.deploymentClient.listDeploymentsForConfig(config.Namespace, config.Name)
	if err != nil {
		return fmt.Errorf("couldn't list Deployments for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}
	var inflightDeployment *kapi.ReplicationController
	latestDeploymentExists := false
	for _, deployment := range existingDeployments.Items {
		// check if this is the latest deployment
		// we'll return after we've dealt with the multiple-active-deployments case
		if deployutil.DeploymentVersionFor(&deployment) == config.LatestVersion {
			latestDeploymentExists = true
		}

		deploymentStatus := deployutil.DeploymentStatusFor(&deployment)
		switch deploymentStatus {
		case deployapi.DeploymentStatusFailed,
			deployapi.DeploymentStatusComplete:
			// Previous deployment in terminal state - can ignore
			// Ignoring specific deployment states so that any newly introduced
			// deployment state will not be ignored
		default:
			if inflightDeployment == nil {
				inflightDeployment = &deployment
				continue
			}
			var deploymentForCancellation *kapi.ReplicationController
			if deployutil.DeploymentVersionFor(inflightDeployment) < deployutil.DeploymentVersionFor(&deployment) {
				deploymentForCancellation, inflightDeployment = inflightDeployment, &deployment
			} else {
				deploymentForCancellation = &deployment
			}

			deploymentForCancellation.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue
			deploymentForCancellation.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledNewerDeploymentExists
			if _, err := c.deploymentClient.updateDeployment(deploymentForCancellation.Namespace, deploymentForCancellation); err != nil {
				util.HandleError(fmt.Errorf("couldn't cancel Deployment %s: %v", deployutil.LabelForDeployment(deploymentForCancellation), err))
			}
			glog.V(4).Infof("Cancelled Deployment %s for DeploymentConfig %s", deployutil.LabelForDeployment(deploymentForCancellation), deployutil.LabelForDeploymentConfig(config))
		}
	}

	// if the latest deployment exists then nothing else needs to be done
	if latestDeploymentExists {
		return nil
	}

	// check to see if there are inflight deployments
	if inflightDeployment != nil {
		// raise a transientError so that the deployment config can be re-queued
		glog.V(4).Infof("Found previous inflight Deployment for %s - will requeue", deployutil.LabelForDeploymentConfig(config))
		return transientError(fmt.Sprintf("found previous inflight Deployment for %s - requeuing", deployutil.LabelForDeploymentConfig(config)))
	}

	// Try and build a deployment for the config.
	deployment, err := c.makeDeployment(config)
	if err != nil {
		return fatalError(fmt.Sprintf("couldn't make Deployment from (potentially invalid) DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err))
	}

	// Compute the desired replicas for the deployment. Use the last completed
	// deployment's current replica count, or the config template if there is no
	// prior completed deployment available.
	desiredReplicas := config.Template.ControllerTemplate.Replicas
	if len(existingDeployments.Items) > 0 {
		sort.Sort(deployutil.DeploymentsByLatestVersionDesc(existingDeployments.Items))
		for _, existing := range existingDeployments.Items {
			if deployutil.DeploymentStatusFor(&existing) == deployapi.DeploymentStatusComplete {
				desiredReplicas = existing.Spec.Replicas
				glog.V(4).Infof("Desired replicas for %s set to %d based on prior completed deployment %s", deployutil.LabelForDeploymentConfig(config), desiredReplicas, existing.Name)
				break
			}
		}
	}
	deployment.Annotations[deployapi.DesiredReplicasAnnotation] = strconv.Itoa(desiredReplicas)

	// Create the deployment.
	if _, err := c.deploymentClient.createDeployment(config.Namespace, deployment); err == nil {
		glog.V(4).Infof("Created Deployment for DeploymentConfig %s", deployutil.LabelForDeploymentConfig(config))
		return nil
	} else {
		// If the deployment was already created, just move on. The cache could be stale, or another
		// process could have already handled this update.
		if errors.IsAlreadyExists(err) {
			glog.V(4).Infof("Deployment already exists for DeploymentConfig %s", deployutil.LabelForDeploymentConfig(config))
			return nil
		}

		// log an event if the deployment could not be created that the user can discover
		c.recorder.Eventf(config, "failedCreate", "Error creating: %v", err)
		return fmt.Errorf("couldn't create Deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}
}
Пример #15
0
// TestCmdDeploy_retryOk ensures that a failed deployment can be retried.
func TestCmdDeploy_retryOk(t *testing.T) {
	deletedPods := []string{}
	config := deploytest.OkDeploymentConfig(1)

	existingDeployment := deploymentFor(config, deployapi.DeploymentStatusFailed)
	existingDeployment.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue
	existingDeployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledByUser

	existingDeployerPods := []kapi.Pod{
		{ObjectMeta: kapi.ObjectMeta{Name: "prehook"}},
		{ObjectMeta: kapi.ObjectMeta{Name: "posthook"}},
		{ObjectMeta: kapi.ObjectMeta{Name: "deployerpod"}},
	}

	var updatedDeployment *kapi.ReplicationController
	commandClient := &deployCommandClientImpl{
		GetDeploymentFn: func(namespace, name string) (*kapi.ReplicationController, error) {
			return existingDeployment, nil
		},
		UpdateDeploymentConfigFn: func(config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {
			t.Fatalf("unexpected call to UpdateDeploymentConfig")
			return nil, nil
		},
		UpdateDeploymentFn: func(deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {
			updatedDeployment = deployment
			return deployment, nil
		},
		ListDeployerPodsForFn: func(namespace, name string) (*kapi.PodList, error) {
			return &kapi.PodList{Items: existingDeployerPods}, nil
		},
		DeletePodFn: func(pod *kapi.Pod) error {
			deletedPods = append(deletedPods, pod.Name)
			return nil
		},
	}

	c := &retryDeploymentCommand{client: commandClient}
	err := c.retry(config, ioutil.Discard)

	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if updatedDeployment == nil {
		t.Fatalf("expected updated config")
	}

	if deployutil.IsDeploymentCancelled(updatedDeployment) {
		t.Fatalf("deployment should not have the cancelled flag set anymore")
	}

	if deployutil.DeploymentStatusReasonFor(updatedDeployment) != "" {
		t.Fatalf("deployment status reason should be empty")
	}

	sort.Strings(deletedPods)
	if !reflect.DeepEqual(deletedPods, []string{"deployerpod", "posthook", "prehook"}) {
		t.Fatalf("Not all deployer pods for the failed deployment were deleted")
	}

	if e, a := deployapi.DeploymentStatusNew, deployutil.DeploymentStatusFor(updatedDeployment); e != a {
		t.Fatalf("expected deployment status %s, got %s", e, a)
	}
}
// TestHandle_cleanupDeploymentFailure ensures that clean up happens
// for the deployment if the deployer pod fails.
//  - failed deployment is scaled down
//  - the last completed deployment is scaled back up
func TestHandle_cleanupDeploymentFailure(t *testing.T) {
	var existingDeployments *kapi.ReplicationControllerList
	var failedDeployment *kapi.ReplicationController
	// map of deployment-version to updated replicas
	var updatedDeployments map[int]*kapi.ReplicationController

	controller := &DeployerPodController{
		deploymentClient: &deploymentClientImpl{
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				return failedDeployment, nil
			},
			updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {
				if _, found := updatedDeployments[deployutil.DeploymentVersionFor(deployment)]; found {
					t.Fatalf("unexpected multiple updates for deployment #%d", deployutil.DeploymentVersionFor(deployment))
				}
				updatedDeployments[deployutil.DeploymentVersionFor(deployment)] = deployment
				return deployment, nil
			},
			listDeploymentsForConfigFunc: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
				return existingDeployments, nil
			},
		},
	}

	type existing struct {
		version         int
		status          deployapi.DeploymentStatus
		initialReplicas int
		updatedReplicas int
	}

	type scenario struct {
		name string
		// this is the deployment that is passed to Handle
		version int
		// this is the target replicas for the deployment that failed
		desiredReplicas int
		// existing deployments also include the one being handled currently
		existing []existing
	}

	// existing deployments intentionally placed un-ordered
	// in order to verify sorting
	scenarios := []scenario{
		{"No previous deployments",
			1, 3, []existing{
				{1, deployapi.DeploymentStatusRunning, 3, 0},
			}},
		{"Multiple existing deployments - none in complete state",
			3, 2, []existing{
				{1, deployapi.DeploymentStatusFailed, 2, 2},
				{2, deployapi.DeploymentStatusFailed, 0, 0},
				{3, deployapi.DeploymentStatusRunning, 2, 0},
			}},
		{"Failed deployment is already at 0 replicas",
			3, 2, []existing{
				{1, deployapi.DeploymentStatusFailed, 2, 2},
				{2, deployapi.DeploymentStatusFailed, 0, 0},
				{3, deployapi.DeploymentStatusRunning, 0, 0},
			}},
		{"Multiple existing completed deployments",
			4, 2, []existing{
				{3, deployapi.DeploymentStatusComplete, 0, 2},
				{2, deployapi.DeploymentStatusComplete, 0, 0},
				{4, deployapi.DeploymentStatusRunning, 1, 0},
				{1, deployapi.DeploymentStatusFailed, 0, 0},
			}},
		// A deployment already exists after the current failed deployment
		// only the current deployment is marked as failed
		// the completed deployment is not scaled up
		{"Deployment exists after current failed",
			4, 2, []existing{
				{3, deployapi.DeploymentStatusComplete, 1, 1},
				{2, deployapi.DeploymentStatusComplete, 0, 0},
				{4, deployapi.DeploymentStatusRunning, 2, 0},
				{5, deployapi.DeploymentStatusNew, 0, 0},
				{1, deployapi.DeploymentStatusFailed, 0, 0},
			}},
	}

	for _, scenario := range scenarios {
		t.Logf("running scenario: %s", scenario.name)
		updatedDeployments = make(map[int]*kapi.ReplicationController)
		failedDeployment = nil
		existingDeployments = &kapi.ReplicationControllerList{}

		for _, e := range scenario.existing {
			d, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(e.version), kapi.Codec)
			d.Annotations[deployapi.DeploymentStatusAnnotation] = string(e.status)
			d.Spec.Replicas = e.initialReplicas
			// if this is the deployment passed to Handle, set the desired replica annotation
			if e.version == scenario.version {
				d.Annotations[deployapi.DesiredReplicasAnnotation] = strconv.Itoa(scenario.desiredReplicas)
				failedDeployment = d
			}
			existingDeployments.Items = append(existingDeployments.Items, *d)
		}
		associatedDeployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(scenario.version), kapi.Codec)
		err := controller.Handle(terminatedPod(associatedDeployment))

		if err != nil {
			t.Fatalf("unexpected error: %v", err)
		}

		// only the failed and the last completed deployment should be updated
		if len(updatedDeployments) > 2 {
			t.Fatalf("expected to update only the failed and last completed deployment")
		}

		for _, existing := range scenario.existing {
			updatedDeployment, ok := updatedDeployments[existing.version]
			if existing.initialReplicas != existing.updatedReplicas {
				if !ok {
					t.Fatalf("expected deployment #%d to be updated", existing.version)
				}
				if e, a := existing.updatedReplicas, updatedDeployment.Spec.Replicas; e != a {
					t.Fatalf("expected deployment #%d to be scaled to %d, got %d", existing.version, e, a)
				}
			} else if ok && existing.version != scenario.version {
				t.Fatalf("unexpected update for deployment #%d; replicas %d; status: %s", existing.version, updatedDeployment.Spec.Replicas, deployutil.DeploymentStatusFor(updatedDeployment))
			}
		}
		if deployutil.DeploymentStatusFor(updatedDeployments[scenario.version]) != deployapi.DeploymentStatusFailed {
			t.Fatalf("status for deployment #%d expected to be updated to failed; got %s", scenario.version, deployutil.DeploymentStatusFor(updatedDeployments[scenario.version]))
		}
		if updatedDeployments[scenario.version].Spec.Replicas != 0 {
			t.Fatalf("deployment #%d expected to be scaled down to 0; got %d", scenario.version, updatedDeployments[scenario.version].Spec.Replicas)
		}
	}
}