Ejemplo n.º 1
0
// executeExecNewPod executes a ExecNewPod hook by creating a new pod based on
// the hook parameters and deployment. The pod is then synchronously watched
// until the pod completes, and if the pod failed, an error is returned.
//
// The hook pod inherits the following from the container the hook refers to:
//
//   * Environment (hook keys take precedence)
//   * Working directory
//   * Resources
func (e *HookExecutor) executeExecNewPod(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, label string) error {
	// Build a pod spec from the hook config and deployment
	podSpec, err := makeHookPod(hook, deployment, label)
	if err != nil {
		return err
	}

	// Try to create the pod.
	pod, err := e.PodClient.CreatePod(deployment.Namespace, podSpec)
	if err != nil {
		if !kerrors.IsAlreadyExists(err) {
			return fmt.Errorf("couldn't create lifecycle pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
		}
	} else {
		glog.V(0).Infof("Created lifecycle pod %s for deployment %s", pod.Name, deployutil.LabelForDeployment(deployment))
	}

	stopChannel := make(chan struct{})
	defer close(stopChannel)
	nextPod := e.PodClient.PodWatch(pod.Namespace, pod.Name, pod.ResourceVersion, stopChannel)

	glog.V(0).Infof("Waiting for hook pod %s/%s to complete", pod.Namespace, pod.Name)
	for {
		pod := nextPod()
		switch pod.Status.Phase {
		case kapi.PodSucceeded:
			return nil
		case kapi.PodFailed:
			return fmt.Errorf(pod.Status.Message)
		}
	}
}
Ejemplo n.º 2
0
// Handle processes deployment and either creates a deployer pod or responds
// to a terminal deployment status.
func (c *DeploymentController) Handle(deployment *kapi.ReplicationController) error {
	currentStatus := deployutil.DeploymentStatusFor(deployment)
	nextStatus := currentStatus

	switch currentStatus {
	case deployapi.DeploymentStatusNew:
		// If the deployment has been cancelled, don't create a deployer pod, and
		// transition to failed immediately.
		if deployutil.IsDeploymentCancelled(deployment) {
			nextStatus = deployapi.DeploymentStatusFailed
			break
		}

		// Generate a deployer pod spec.
		podTemplate, err := c.makeDeployerPod(deployment)
		if err != nil {
			return fatalError(fmt.Sprintf("couldn't make deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err))
		}

		// Create the deployer pod.
		deploymentPod, err := c.podClient.createPod(deployment.Namespace, podTemplate)
		if err == nil {
			deployment.Annotations[deployapi.DeploymentPodAnnotation] = deploymentPod.Name
			nextStatus = deployapi.DeploymentStatusPending
			glog.V(4).Infof("Created pod %s for deployment %s", deploymentPod.Name, deployutil.LabelForDeployment(deployment))
			break
		}

		// Retry on error.
		if !kerrors.IsAlreadyExists(err) {
			c.recorder.Eventf(deployment, "failedCreate", "Error creating deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
			return fmt.Errorf("couldn't create deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
		}

		// If the pod already exists, it's possible that a previous CreatePod
		// succeeded but the deployment state update failed and now we're re-
		// entering. Ensure that the pod is the one we created by verifying the
		// annotation on it, and throw a retryable error.
		existingPod, err := c.podClient.getPod(deployment.Namespace, deployutil.DeployerPodNameForDeployment(deployment.Name))
		if err != nil {
			c.recorder.Eventf(deployment, "failedCreate", "Error getting existing deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
			return fmt.Errorf("couldn't fetch existing deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
		}

		// Do a stronger check to validate that the existing deployer pod is
		// actually for this deployment, and if not, fail this deployment.
		//
		// TODO: Investigate checking the container image of the running pod and
		// comparing with the intended deployer pod image. If we do so, we'll need
		// to ensure that changes to 'unrelated' pods don't result in updates to
		// the deployment. So, the image check will have to be done in other areas
		// of the code as well.
		if deployutil.DeploymentNameFor(existingPod) != deployment.Name {
			nextStatus = deployapi.DeploymentStatusFailed
			deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedUnrelatedDeploymentExists
			c.recorder.Eventf(deployment, "failedCreate", "Error creating deployer pod for %s since another pod with the same name (%q) exists", deployutil.LabelForDeployment(deployment), existingPod.Name)
			glog.V(2).Infof("Couldn't create deployer pod for %s since an unrelated pod with the same name (%q) exists", deployutil.LabelForDeployment(deployment), existingPod.Name)
			break
		}

		// Update to pending relative to the existing validated deployer pod.
		deployment.Annotations[deployapi.DeploymentPodAnnotation] = existingPod.Name
		nextStatus = deployapi.DeploymentStatusPending
		glog.V(4).Infof("Detected existing deployer pod %s for deployment %s", existingPod.Name, deployutil.LabelForDeployment(deployment))
	case deployapi.DeploymentStatusPending, deployapi.DeploymentStatusRunning:
		// If the deployer pod has vanished, consider the deployment a failure.
		deployerPodName := deployutil.DeployerPodNameForDeployment(deployment.Name)
		if _, err := c.podClient.getPod(deployment.Namespace, deployerPodName); err != nil {
			if kerrors.IsNotFound(err) {
				nextStatus = deployapi.DeploymentStatusFailed
				deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus)
				deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedDeployerPodNoLongerExists
				c.recorder.Eventf(deployment, "failed", "Deployer pod %q has gone missing", deployerPodName)
				glog.V(4).Infof("Failing deployment %q because its deployer pod %q disappeared", deployutil.LabelForDeployment(deployment), deployerPodName)
				break
			} else {
				// We'll try again later on resync. Continue to process cancellations.
				glog.V(2).Infof("Error getting deployer pod %s for deployment %s: %#v", deployerPodName, deployutil.LabelForDeployment(deployment), err)
			}
		}

		// If the deployment is cancelled, terminate any deployer/hook pods.
		// NOTE: Do not mark the deployment as Failed just yet.
		// The deployment will be marked as Failed by the deployer pod controller
		// when the deployer pod failure state is picked up
		// Also, it will scale down the failed deployment and scale back up
		// the last successful completed deployment
		if deployutil.IsDeploymentCancelled(deployment) {
			deployerPods, err := c.podClient.getDeployerPodsFor(deployment.Namespace, deployment.Name)
			if err != nil {
				return fmt.Errorf("couldn't fetch deployer pods for %s while trying to cancel deployment: %v", deployutil.LabelForDeployment(deployment), err)
			}
			glog.V(4).Infof("Cancelling %d deployer pods for deployment %s", len(deployerPods), deployutil.LabelForDeployment(deployment))
			zeroDelay := int64(1)
			for _, deployerPod := range deployerPods {
				// Set the ActiveDeadlineSeconds on the pod so it's terminated very soon.
				if deployerPod.Spec.ActiveDeadlineSeconds == nil || *deployerPod.Spec.ActiveDeadlineSeconds != zeroDelay {
					deployerPod.Spec.ActiveDeadlineSeconds = &zeroDelay
					if _, err := c.podClient.updatePod(deployerPod.Namespace, &deployerPod); err != nil {
						c.recorder.Eventf(deployment, "failedCancellation", "Error cancelling deployer pod %s for deployment %s: %v", deployerPod.Name, deployutil.LabelForDeployment(deployment), err)
						return fmt.Errorf("couldn't cancel deployer pod %s for deployment %s: %v", deployutil.LabelForDeployment(deployment), err)
					}
					glog.V(4).Infof("Cancelled deployer pod %s for deployment %s", deployerPod.Name, deployutil.LabelForDeployment(deployment))
				}
			}
			c.recorder.Eventf(deployment, "cancelled", "Cancelled deployment")
		}
	case deployapi.DeploymentStatusFailed:
		// Nothing to do in this terminal state.
		glog.V(4).Infof("Ignoring deployment %s (status %s)", deployutil.LabelForDeployment(deployment), currentStatus)
	case deployapi.DeploymentStatusComplete:
		// now list any pods in the namespace that have the specified label
		deployerPods, err := c.podClient.getDeployerPodsFor(deployment.Namespace, deployment.Name)
		if err != nil {
			return fmt.Errorf("couldn't fetch deployer pods for %s after successful completion: %v", deployutil.LabelForDeployment(deployment), err)
		}
		glog.V(4).Infof("Deleting %d deployer pods for deployment %s", len(deployerPods), deployutil.LabelForDeployment(deployment))
		cleanedAll := true
		for _, deployerPod := range deployerPods {
			if err := c.podClient.deletePod(deployerPod.Namespace, deployerPod.Name); err != nil {
				if !kerrors.IsNotFound(err) {
					// if the pod deletion failed, then log the error and continue
					// we will try to delete any remaining deployer pods and return an error later
					kutil.HandleError(fmt.Errorf("couldn't delete completed deployer pod %s/%s for deployment %s: %v", deployment.Namespace, deployerPod.Name, deployutil.LabelForDeployment(deployment), err))
					cleanedAll = false
				}
				// Already deleted
			} else {
				glog.V(4).Infof("Deleted completed deployer pod %s/%s for deployment %s", deployment.Namespace, deployerPod.Name, deployutil.LabelForDeployment(deployment))
			}
		}

		if !cleanedAll {
			return fmt.Errorf("couldn't clean up all deployer pods for %s", deployutil.LabelForDeployment(deployment))
		}
	}

	if currentStatus != nextStatus {
		deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus)
		if _, err := c.deploymentClient.updateDeployment(deployment.Namespace, deployment); err != nil {
			c.recorder.Eventf(deployment, "failedUpdate", "Error updating deployment %s status to %s", deployutil.LabelForDeployment(deployment), nextStatus)
			return fmt.Errorf("couldn't update deployment %s to status %s: %v", deployutil.LabelForDeployment(deployment), nextStatus, err)
		}
		glog.V(4).Infof("Updated deployment %s status from %s to %s", deployutil.LabelForDeployment(deployment), currentStatus, nextStatus)
	}
	return nil
}
Ejemplo n.º 3
0
// Deploy starts the deployment process for deploymentName.
func (d *Deployer) Deploy(namespace, deploymentName string) error {
	// Look up the new deployment.
	to, err := d.getDeployment(namespace, deploymentName)
	if err != nil {
		return fmt.Errorf("couldn't get deployment %s/%s: %v", namespace, deploymentName, err)
	}

	// Decode the config from the deployment.
	config, err := deployutil.DecodeDeploymentConfig(to, latest.Codec)
	if err != nil {
		return fmt.Errorf("couldn't decode DeploymentConfig from deployment %s/%s: %v", to.Namespace, to.Name, err)
	}

	// Get a strategy for the deployment.
	strategy, err := d.strategyFor(config)
	if err != nil {
		return err
	}

	// New deployments must have a desired replica count.
	desiredReplicas, hasDesired := deployutil.DeploymentDesiredReplicas(to)
	if !hasDesired {
		return fmt.Errorf("deployment %s has no desired replica count", deployutil.LabelForDeployment(to))
	}

	// Find all deployments for the config.
	unsortedDeployments, err := d.getDeployments(namespace, config.Name)
	if err != nil {
		return fmt.Errorf("couldn't get controllers in namespace %s: %v", namespace, err)
	}
	deployments := unsortedDeployments.Items

	// Sort all the deployments by version.
	sort.Sort(deployutil.DeploymentsByLatestVersionDesc(deployments))

	// Find any last completed deployment.
	var from *kapi.ReplicationController
	for _, candidate := range deployments {
		if candidate.Name == to.Name {
			continue
		}
		if deployutil.DeploymentStatusFor(&candidate) == deployapi.DeploymentStatusComplete {
			from = &candidate
			break
		}
	}

	// Scale down any deployments which aren't the new or last deployment.
	for _, candidate := range deployments {
		// Skip the from/to deployments.
		if candidate.Name == to.Name {
			continue
		}
		if from != nil && candidate.Name == from.Name {
			continue
		}
		// Skip the deployment if it's already scaled down.
		if candidate.Spec.Replicas == 0 {
			continue
		}
		// Scale the deployment down to zero.
		retryWaitParams := kubectl.NewRetryParams(1*time.Second, 120*time.Second)
		if err := d.scaler.Scale(candidate.Namespace, candidate.Name, uint(0), &kubectl.ScalePrecondition{-1, ""}, retryWaitParams, retryWaitParams); err != nil {
			glog.Errorf("Couldn't scale down prior deployment %s: %v", deployutil.LabelForDeployment(&candidate), err)
		} else {
			glog.Infof("Scaled down prior deployment %s", deployutil.LabelForDeployment(&candidate))
		}
	}

	// Perform the deployment.
	if from == nil {
		glog.Infof("Deploying %s for the first time (replicas: %d)", deployutil.LabelForDeployment(to), desiredReplicas)
	} else {
		glog.Infof("Deploying from %s to %s (replicas: %d)", deployutil.LabelForDeployment(from), deployutil.LabelForDeployment(to), desiredReplicas)
	}
	return strategy.Deploy(from, to, desiredReplicas)
}
Ejemplo n.º 4
0
func (s *RollingDeploymentStrategy) Deploy(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int) error {
	config, err := deployutil.DecodeDeploymentConfig(to, s.codec)
	if err != nil {
		return fmt.Errorf("couldn't decode DeploymentConfig from deployment %s: %v", deployutil.LabelForDeployment(to), err)
	}

	params := config.Template.Strategy.RollingParams
	updateAcceptor := s.getUpdateAcceptor(time.Duration(*params.TimeoutSeconds) * time.Second)

	// If there's no prior deployment, delegate to another strategy since the
	// rolling updater only supports transitioning between two deployments.
	//
	// Hook support is duplicated here for now. When the rolling updater can
	// handle initial deployments, all of this code can go away.
	if from == nil {
		// Execute any pre-hook.
		if params.Pre != nil {
			err := s.hookExecutor.Execute(params.Pre, to, "prehook")
			if err != nil {
				return fmt.Errorf("Pre hook failed: %s", err)
			}
			glog.Infof("Pre hook finished")
		}

		// Execute the delegate strategy.
		err := s.initialStrategy.DeployWithAcceptor(from, to, desiredReplicas, updateAcceptor)
		if err != nil {
			return err
		}

		// Execute any post-hook. Errors are logged and ignored.
		if params.Post != nil {
			err := s.hookExecutor.Execute(params.Post, to, "posthook")
			if err != nil {
				util.HandleError(fmt.Errorf("post hook failed: %s", err))
			} else {
				glog.Infof("Post hook finished")
			}
		}

		// All done.
		return nil
	}

	// Prepare for a rolling update.
	// Execute any pre-hook.
	if params.Pre != nil {
		err := s.hookExecutor.Execute(params.Pre, to, "prehook")
		if err != nil {
			return fmt.Errorf("pre hook failed: %s", err)
		}
		glog.Infof("Pre hook finished")
	}

	// HACK: Assign the source ID annotation that the rolling updater expects,
	// unless it already exists on the deployment.
	//
	// Related upstream issue:
	// https://github.com/GoogleCloudPlatform/kubernetes/pull/7183
	to, err = s.client.GetReplicationController(to.Namespace, to.Name)
	if err != nil {
		return fmt.Errorf("couldn't look up deployment %s: %s", deployutil.LabelForDeployment(to))
	}
	if _, hasSourceId := to.Annotations[sourceIdAnnotation]; !hasSourceId {
		to.Annotations[sourceIdAnnotation] = fmt.Sprintf("%s:%s", from.Name, from.ObjectMeta.UID)
		if updated, err := s.client.UpdateReplicationController(to.Namespace, to); err != nil {
			return fmt.Errorf("couldn't assign source annotation to deployment %s: %v", deployutil.LabelForDeployment(to), err)
		} else {
			to = updated
		}
	}

	// HACK: There's a validation in the rolling updater which assumes that when
	// an existing RC is supplied, it will have >0 replicas- a validation which
	// is then disregarded as the desired count is obtained from the annotation
	// on the RC. For now, fake it out by just setting replicas to 1.
	//
	// Related upstream issue:
	// https://github.com/GoogleCloudPlatform/kubernetes/pull/7183
	to.Spec.Replicas = 1

	// Perform a rolling update.
	rollingConfig := &kubectl.RollingUpdaterConfig{
		Out:            &rollingUpdaterWriter{},
		OldRc:          from,
		NewRc:          to,
		UpdatePeriod:   time.Duration(*params.UpdatePeriodSeconds) * time.Second,
		Interval:       time.Duration(*params.IntervalSeconds) * time.Second,
		Timeout:        time.Duration(*params.TimeoutSeconds) * time.Second,
		CleanupPolicy:  kubectl.PreserveRollingUpdateCleanupPolicy,
		UpdateAcceptor: updateAcceptor,
	}
	glog.Infof("Starting rolling update from %s to %s (desired replicas: %d, UpdatePeriodSeconds=%d, IntervalSeconds=%d, TimeoutSeconds=%d)",
		deployutil.LabelForDeployment(from),
		deployutil.LabelForDeployment(to),
		desiredReplicas,
		*params.UpdatePeriodSeconds,
		*params.IntervalSeconds,
		*params.TimeoutSeconds,
	)
	if err := s.rollingUpdate(rollingConfig); err != nil {
		return err
	}

	// Execute any post-hook. Errors are logged and ignored.
	if params.Post != nil {
		err := s.hookExecutor.Execute(params.Post, to, "posthook")
		if err != nil {
			util.HandleError(fmt.Errorf("Post hook failed: %s", err))
		} else {
			glog.Info("Post hook finished")
		}
	}

	return nil
}
Ejemplo n.º 5
0
// Handle syncs pod's status with any associated deployment.
func (c *DeployerPodController) Handle(pod *kapi.Pod) error {
	// Find the deployment associated with the deployer pod.
	deploymentName := deployutil.DeploymentNameFor(pod)
	if len(deploymentName) == 0 {
		return nil
	}
	// Reject updates to anything but the main deployer pod
	// TODO: Find a way to filter this on the watch side.
	if pod.Name != deployutil.DeployerPodNameForDeployment(deploymentName) {
		return nil
	}

	deployment, err := c.deploymentClient.getDeployment(pod.Namespace, deploymentName)
	// If the deployment for this pod has disappeared, we should clean up this
	// and any other deployer pods, then bail out.
	if err != nil {
		// Some retrieval error occured. Retry.
		if !kerrors.IsNotFound(err) {
			return fmt.Errorf("couldn't get deployment %s/%s which owns deployer pod %s/%s", pod.Namespace, deploymentName, pod.Name, pod.Namespace)
		}
		// Find all the deployer pods for the deployment (including this one).
		deployers, err := c.deployerPodsFor(pod.Namespace, deploymentName)
		if err != nil {
			// Retry.
			return fmt.Errorf("couldn't get deployer pods for %s: %v", deployutil.LabelForDeployment(deployment), err)
		}
		// Delete all deployers.
		for _, deployer := range deployers.Items {
			err := c.deletePod(deployer.Namespace, deployer.Name)
			if err != nil {
				if !kerrors.IsNotFound(err) {
					// TODO: Should this fire an event?
					glog.V(2).Infof("Couldn't delete orphaned deployer pod %s/%s: %v", deployer.Namespace, deployer.Name, err)
				}
			} else {
				// TODO: Should this fire an event?
				glog.V(2).Infof("Deleted orphaned deployer pod %s/%s", deployer.Namespace, deployer.Name)
			}
		}
		return nil
	}

	currentStatus := deployutil.DeploymentStatusFor(deployment)
	nextStatus := currentStatus

	switch pod.Status.Phase {
	case kapi.PodRunning:
		nextStatus = deployapi.DeploymentStatusRunning
	case kapi.PodSucceeded:
		// Detect failure based on the container state
		nextStatus = deployapi.DeploymentStatusComplete
		for _, info := range pod.Status.ContainerStatuses {
			if info.State.Termination != nil && info.State.Termination.ExitCode != 0 {
				nextStatus = deployapi.DeploymentStatusFailed
			}
		}
	case kapi.PodFailed:
		// if the deployment is already marked Failed, do not attempt clean up again
		if currentStatus != deployapi.DeploymentStatusFailed {
			// clean up will also update the deployment status to Failed
			// failure to clean up will result in retries and
			// the deployment will not be marked Failed
			// Note: this will prevent new deployments from being created for this config
			err := c.cleanupFailedDeployment(deployment)
			if err != nil {
				return transientError(fmt.Sprintf("couldn't clean up failed deployment: %v", err))
			}
		}
	}

	if currentStatus != nextStatus {
		deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus)
		if _, err := c.deploymentClient.updateDeployment(deployment.Namespace, deployment); err != nil {
			if kerrors.IsNotFound(err) {
				return nil
			}
			return fmt.Errorf("couldn't update Deployment %s to status %s: %v", deployutil.LabelForDeployment(deployment), nextStatus, err)
		}
		glog.V(4).Infof("Updated Deployment %s status from %s to %s", deployutil.LabelForDeployment(deployment), currentStatus, nextStatus)
	}

	return nil
}
Ejemplo n.º 6
0
func (c *DeployerPodController) cleanupFailedDeployment(deployment *kapi.ReplicationController) error {
	// Scale down the current failed deployment
	configName := deployutil.DeploymentConfigNameFor(deployment)
	existingDeployments, err := c.deploymentClient.listDeploymentsForConfig(deployment.Namespace, configName)
	if err != nil {
		return fmt.Errorf("couldn't list Deployments for DeploymentConfig %s: %v", configName, err)
	}

	desiredReplicas, ok := deployutil.DeploymentDesiredReplicas(deployment)
	if !ok {
		// if desired replicas could not be found, then log the error
		// and update the failed deployment
		// this cannot be treated as a transient error
		kutil.HandleError(fmt.Errorf("Could not determine desired replicas from %s to reset replicas for last completed deployment", deployutil.LabelForDeployment(deployment)))
	}

	if ok && len(existingDeployments.Items) > 0 {
		sort.Sort(deployutil.DeploymentsByLatestVersionDesc(existingDeployments.Items))
		for index, existing := range existingDeployments.Items {
			// if a newer deployment exists:
			// - set the replicas for the current failed deployment to 0
			// - there is no point in scaling up the last completed deployment
			// since that will be scaled down by the later deployment
			if index == 0 && existing.Name != deployment.Name {
				break
			}

			// the latest completed deployment is the one that needs to be scaled back up
			if deployutil.DeploymentStatusFor(&existing) == deployapi.DeploymentStatusComplete {
				if existing.Spec.Replicas == desiredReplicas {
					break
				}

				// scale back the completed deployment to the target of the failed deployment
				existing.Spec.Replicas = desiredReplicas
				if _, err := c.deploymentClient.updateDeployment(existing.Namespace, &existing); err != nil {
					if kerrors.IsNotFound(err) {
						return nil
					}
					return fmt.Errorf("couldn't update replicas to %d for deployment %s: %v", desiredReplicas, deployutil.LabelForDeployment(&existing), err)
				}
				glog.V(4).Infof("Updated replicas to %d for deployment %s", desiredReplicas, deployutil.LabelForDeployment(&existing))

				break
			}
		}
	}
	// set the replicas for the failed deployment to 0
	// and set the status to Failed
	deployment.Spec.Replicas = 0
	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusFailed)
	if _, err := c.deploymentClient.updateDeployment(deployment.Namespace, deployment); err != nil {
		if kerrors.IsNotFound(err) {
			return nil
		}
		return fmt.Errorf("couldn't scale down the deployment %s and mark it as failed: %v", deployutil.LabelForDeployment(deployment), err)
	}
	glog.V(4).Infof("Scaled down the deployment %s and marked it as failed", deployutil.LabelForDeployment(deployment))

	return nil
}
Ejemplo n.º 7
0
// Handle processes config and creates a new deployment if necessary.
func (c *DeploymentConfigController) Handle(config *deployapi.DeploymentConfig) error {
	// Only deploy when the version has advanced past 0.
	if config.LatestVersion == 0 {
		glog.V(5).Infof("Waiting for first version of %s", deployutil.LabelForDeploymentConfig(config))
		return nil
	}

	// Check if any existing inflight deployments (any non-terminal state).
	existingDeployments, err := c.deploymentClient.listDeploymentsForConfig(config.Namespace, config.Name)
	if err != nil {
		return fmt.Errorf("couldn't list Deployments for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}
	var inflightDeployment *kapi.ReplicationController
	latestDeploymentExists := false
	for _, deployment := range existingDeployments.Items {
		// check if this is the latest deployment
		// we'll return after we've dealt with the multiple-active-deployments case
		if deployutil.DeploymentVersionFor(&deployment) == config.LatestVersion {
			latestDeploymentExists = true
		}

		deploymentStatus := deployutil.DeploymentStatusFor(&deployment)
		switch deploymentStatus {
		case deployapi.DeploymentStatusFailed,
			deployapi.DeploymentStatusComplete:
			// Previous deployment in terminal state - can ignore
			// Ignoring specific deployment states so that any newly introduced
			// deployment state will not be ignored
		default:
			if inflightDeployment == nil {
				inflightDeployment = &deployment
				continue
			}
			var deploymentForCancellation *kapi.ReplicationController
			if deployutil.DeploymentVersionFor(inflightDeployment) < deployutil.DeploymentVersionFor(&deployment) {
				deploymentForCancellation, inflightDeployment = inflightDeployment, &deployment
			} else {
				deploymentForCancellation = &deployment
			}

			deploymentForCancellation.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue
			deploymentForCancellation.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledNewerDeploymentExists
			if _, err := c.deploymentClient.updateDeployment(deploymentForCancellation.Namespace, deploymentForCancellation); err != nil {
				util.HandleError(fmt.Errorf("couldn't cancel Deployment %s: %v", deployutil.LabelForDeployment(deploymentForCancellation), err))
			}
			glog.V(4).Infof("Cancelled Deployment %s for DeploymentConfig %s", deployutil.LabelForDeployment(deploymentForCancellation), deployutil.LabelForDeploymentConfig(config))
		}
	}

	// if the latest deployment exists then nothing else needs to be done
	if latestDeploymentExists {
		return nil
	}

	// check to see if there are inflight deployments
	if inflightDeployment != nil {
		// raise a transientError so that the deployment config can be re-queued
		glog.V(4).Infof("Found previous inflight Deployment for %s - will requeue", deployutil.LabelForDeploymentConfig(config))
		return transientError(fmt.Sprintf("found previous inflight Deployment for %s - requeuing", deployutil.LabelForDeploymentConfig(config)))
	}

	// Try and build a deployment for the config.
	deployment, err := c.makeDeployment(config)
	if err != nil {
		return fatalError(fmt.Sprintf("couldn't make Deployment from (potentially invalid) DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err))
	}

	// Compute the desired replicas for the deployment. Use the last completed
	// deployment's current replica count, or the config template if there is no
	// prior completed deployment available.
	desiredReplicas := config.Template.ControllerTemplate.Replicas
	if len(existingDeployments.Items) > 0 {
		sort.Sort(deployutil.DeploymentsByLatestVersionDesc(existingDeployments.Items))
		for _, existing := range existingDeployments.Items {
			if deployutil.DeploymentStatusFor(&existing) == deployapi.DeploymentStatusComplete {
				desiredReplicas = existing.Spec.Replicas
				glog.V(4).Infof("Desired replicas for %s set to %d based on prior completed deployment %s", deployutil.LabelForDeploymentConfig(config), desiredReplicas, existing.Name)
				break
			}
		}
	}
	deployment.Annotations[deployapi.DesiredReplicasAnnotation] = strconv.Itoa(desiredReplicas)

	// Create the deployment.
	if _, err := c.deploymentClient.createDeployment(config.Namespace, deployment); err == nil {
		glog.V(4).Infof("Created Deployment for DeploymentConfig %s", deployutil.LabelForDeploymentConfig(config))
		return nil
	} else {
		// If the deployment was already created, just move on. The cache could be stale, or another
		// process could have already handled this update.
		if errors.IsAlreadyExists(err) {
			glog.V(4).Infof("Deployment already exists for DeploymentConfig %s", deployutil.LabelForDeploymentConfig(config))
			return nil
		}

		// log an event if the deployment could not be created that the user can discover
		c.recorder.Eventf(config, "failedCreate", "Error creating: %v", err)
		return fmt.Errorf("couldn't create Deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}
}
Ejemplo n.º 8
0
// DeployWithAcceptor scales down from and then scales up to. If
// updateAcceptor is provided and the desired replica count is >1, the first
// replica of to is rolled out and validated before performing the full scale
// up.
//
// This is currently only used in conjunction with the rolling update strategy
// for initial deployments.
func (s *RecreateDeploymentStrategy) DeployWithAcceptor(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int, updateAcceptor kubectl.UpdateAcceptor) error {
	config, err := deployutil.DecodeDeploymentConfig(to, s.codec)
	if err != nil {
		return fmt.Errorf("couldn't decode config from deployment %s: %v", to.Name, err)
	}

	params := config.Template.Strategy.RecreateParams
	retryParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout)
	waitParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout)

	// Execute any pre-hook.
	if params != nil && params.Pre != nil {
		if err := s.hookExecutor.Execute(params.Pre, to, "prehook"); err != nil {
			return fmt.Errorf("Pre hook failed: %s", err)
		} else {
			glog.Infof("Pre hook finished")
		}
	}

	// Scale down the from deployment.
	if from != nil {
		glog.Infof("Scaling %s down to zero", deployutil.LabelForDeployment(from))
		_, err := s.scaleAndWait(from, 0, retryParams, waitParams)
		if err != nil {
			return fmt.Errorf("couldn't scale %s to 0: %v", deployutil.LabelForDeployment(from), err)
		}
	}

	// If an UpdateAcceptor is provided and we're trying to scale up to more
	// than one replica, scale up to 1 and validate the replica, aborting if the
	// replica isn't acceptable.
	if updateAcceptor != nil && desiredReplicas > 1 {
		glog.Infof("Scaling %s to 1 before validating first replica", deployutil.LabelForDeployment(to))
		updatedTo, err := s.scaleAndWait(to, 1, retryParams, waitParams)
		if err != nil {
			return fmt.Errorf("couldn't scale %s to 1: %v", deployutil.LabelForDeployment(to), err)
		}
		glog.Infof("Validating first replica of %s", deployutil.LabelForDeployment(to))
		if err := updateAcceptor.Accept(updatedTo); err != nil {
			return fmt.Errorf("first replica rejected for %s: %v", to.Name, err)
		}
		to = updatedTo
	}

	// Complete the scale up.
	glog.Infof("Scaling %s to %d", deployutil.LabelForDeployment(to), desiredReplicas)
	updatedTo, err := s.scaleAndWait(to, desiredReplicas, retryParams, waitParams)
	if err != nil {
		return fmt.Errorf("couldn't scale %s to %d: %v", deployutil.LabelForDeployment(to), desiredReplicas, err)
	}
	to = updatedTo

	// Execute any post-hook. Errors are logged and ignored.
	if params != nil && params.Post != nil {
		if err := s.hookExecutor.Execute(params.Post, to, "posthook"); err != nil {
			util.HandleError(fmt.Errorf("post hook failed: %s", err))
		} else {
			glog.Infof("Post hook finished")
		}
	}

	glog.Infof("Deployment %s successfully made active", to.Name)
	return nil
}
Ejemplo n.º 9
0
// Handle processes change triggers for config.
func (c *DeploymentConfigChangeController) Handle(config *deployapi.DeploymentConfig) error {
	hasChangeTrigger := false
	for _, trigger := range config.Triggers {
		if trigger.Type == deployapi.DeploymentTriggerOnConfigChange {
			hasChangeTrigger = true
			break
		}
	}

	if !hasChangeTrigger {
		glog.V(4).Infof("Ignoring DeploymentConfig %s; no change triggers detected", deployutil.LabelForDeploymentConfig(config))
		return nil
	}

	if config.LatestVersion == 0 {
		_, _, err := c.generateDeployment(config)
		if err != nil {
			if kerrors.IsConflict(err) {
				return fatalError(fmt.Sprintf("DeploymentConfig %s updated since retrieval; aborting trigger: %v", deployutil.LabelForDeploymentConfig(config), err))
			}
			c.recorder.Eventf(config, "failedCreate", "Couldn't create initial deployment: %v", err)
			return fmt.Errorf("couldn't create initial Deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
		}
		glog.V(4).Infof("Created initial Deployment for DeploymentConfig %s", deployutil.LabelForDeploymentConfig(config))
		return nil
	}

	latestDeploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := c.changeStrategy.getDeployment(config.Namespace, latestDeploymentName)
	if err != nil {
		// If there's no deployment for the latest config, we have no basis of
		// comparison. It's the responsibility of the deployment config controller
		// to make the deployment for the config, so return early.
		if kerrors.IsNotFound(err) {
			glog.V(2).Infof("Ignoring change for DeploymentConfig %s; no existing Deployment found", deployutil.LabelForDeploymentConfig(config))
			return nil
		}
		return fmt.Errorf("couldn't retrieve Deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}

	deployedConfig, err := c.decodeConfig(deployment)
	if err != nil {
		return fatalError(fmt.Sprintf("error decoding DeploymentConfig from Deployment %s for DeploymentConfig %s: %v", deployutil.LabelForDeployment(deployment), deployutil.LabelForDeploymentConfig(config), err))
	}

	newSpec, oldSpec := config.Template.ControllerTemplate.Template.Spec, deployedConfig.Template.ControllerTemplate.Template.Spec
	if kapi.Semantic.DeepEqual(oldSpec, newSpec) {
		glog.V(2).Infof("Ignoring DeploymentConfig change for %s (latestVersion=%d); same as Deployment %s", deployutil.LabelForDeploymentConfig(config), config.LatestVersion, deployutil.LabelForDeployment(deployment))
		return nil
	}

	fromVersion, toVersion, err := c.generateDeployment(config)
	if err != nil {
		if kerrors.IsConflict(err) {
			return fatalError(fmt.Sprintf("DeploymentConfig %s updated since retrieval; aborting trigger: %v", deployutil.LabelForDeploymentConfig(config), err))
		}
		return fmt.Errorf("couldn't generate deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}
	glog.V(4).Infof("Updated DeploymentConfig %s from version %d to %d for existing deployment %s", deployutil.LabelForDeploymentConfig(config), fromVersion, toVersion, deployutil.LabelForDeployment(deployment))
	return nil
}
Ejemplo n.º 10
0
func (c *FirstContainerReady) Accept(deployment *kapi.ReplicationController) error {
	// For now, only validate the first replica.
	if deployment.Spec.Replicas != 1 {
		glog.Infof("automatically accepting deployment %s with %d replicas", deployutil.LabelForDeployment(deployment), deployment.Spec.Replicas)
		return nil
	}

	// Try and find the pod for the deployment.
	pods, err := c.podsForDeployment(deployment)
	if err != nil {
		return fmt.Errorf("couldn't get pods for deployment %s: %v", deployutil.LabelForDeployment(deployment), err)
	}
	if len(pods.Items) == 0 {
		return fmt.Errorf("no pods found for deployment %s", deployutil.LabelForDeployment(deployment))
	}

	// If we found multiple, use the first one and log a warning.
	// TODO: should finding multiple be an error?
	pod := &pods.Items[0]
	if len(pods.Items) > 1 {
		glog.Infof("Warning: more than one pod for deployment %s; basing canary check on the first pod '%s'", deployutil.LabelForDeployment(deployment), pod.Name)
	}

	// Make a pod store to poll and ensure it gets cleaned up.
	podStore, stopStore := c.getPodStore(pod.Namespace, pod.Name)
	defer close(stopStore)

	// Track container readiness based on those defined in the spec.
	observedContainers := map[string]bool{}
	for _, container := range pod.Spec.Containers {
		observedContainers[container.Name] = false
	}

	// Start checking for pod updates.
	glog.V(0).Infof("Waiting for pod %s/%s container readiness", pod.Namespace, pod.Name)
	err = wait.Poll(c.interval, c.timeout, func() (done bool, err error) {
		// Get the latest state of the pod.
		obj, exists, err := podStore.Get(pod)
		// Try again later on error or if the pod isn't available yet.
		if err != nil {
			glog.V(0).Infof("Error getting pod %s/%s to inspect container readiness: %v", pod.Namespace, pod.Name, err)
			return false, nil
		}
		if !exists {
			glog.V(0).Infof("Couldn't find pod %s/%s to inspect container readiness", pod.Namespace, pod.Name)
			return false, nil
		}
		// New pod state is available; update the observed ready status of any
		// containers.
		updatedPod := obj.(*kapi.Pod)
		for _, status := range updatedPod.Status.ContainerStatuses {
			// Ignore any containers which aren't defined in the deployment spec.
			if _, known := observedContainers[status.Name]; !known {
				glog.V(0).Infof("Ignoring readiness of container %s in pod %s/%s because it's not present in the pod spec", status.Name, pod.Namespace, pod.Name)
				continue
			}
			// The status of the container could be transient; we only care if it
			// was ever ready. If it was ready and then became not ready, we
			// consider it ready.
			if status.Ready {
				observedContainers[status.Name] = true
			}
		}
		// Check whether all containers have been observed as ready.
		allReady := true
		for _, ready := range observedContainers {
			if !ready {
				allReady = false
				break
			}
		}
		// If all containers have been ready once, return success.
		if allReady {
			glog.V(0).Infof("All containers ready for %s/%s", pod.Namespace, pod.Name)
			return true, nil
		}
		// Otherwise, try again later.
		glog.V(4).Infof("Still waiting for pod %s/%s container readiness; observed statuses: #%v", pod.Namespace, pod.Name, observedContainers)
		return false, nil
	})

	if err != nil {
		if err == wait.ErrWaitTimeout {
			return fmt.Errorf("timed out waiting for pod %s/%s containers to become ready", pod.Namespace, pod.Name)
		}
		return fmt.Errorf("pod %s/%s failed readiness check: %v", pod.Namespace, pod.Name, err)
	}

	return nil
}