Ejemplo n.º 1
0
// reconcileDeployments reconciles existing deployment replica counts which
// could have diverged outside the deployment process (e.g. due to auto or
// manual scaling, or partial deployments). The active deployment is the last
// successful deployment, not necessarily the latest in terms of the config
// version. The active deployment replica count should follow the config, and
// all other deployments should be scaled to zero.
func (c *DeploymentConfigController) reconcileDeployments(existingDeployments []kapi.ReplicationController, config *deployapi.DeploymentConfig) error {
	activeDeployment := deployutil.ActiveDeployment(existingDeployments)

	// Reconcile deployments. The active deployment follows the config, and all
	// other deployments should be scaled to zero.
	var updatedDeployments []kapi.ReplicationController
	for i := range existingDeployments {
		deployment := existingDeployments[i]
		toAppend := deployment

		isActiveDeployment := activeDeployment != nil && deployment.Name == activeDeployment.Name

		oldReplicaCount := deployment.Spec.Replicas
		newReplicaCount := int32(0)
		if isActiveDeployment {
			newReplicaCount = config.Spec.Replicas
		}
		if config.Spec.Test {
			glog.V(4).Infof("Deployment config %q is test and deployment %q will be scaled down", deployutil.LabelForDeploymentConfig(config), deployutil.LabelForDeployment(&deployment))
			newReplicaCount = 0
		}

		// Only update if necessary.
		var copied *kapi.ReplicationController
		if newReplicaCount != oldReplicaCount {
			if err := kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
				// refresh the replication controller version
				rc, err := c.rcStore.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
				if err != nil {
					return err
				}
				copied, err = deployutil.DeploymentDeepCopy(rc)
				if err != nil {
					glog.V(2).Infof("Deep copy of deployment %q failed: %v", rc.Name, err)
					return err
				}
				copied.Spec.Replicas = newReplicaCount
				copied, err = c.rn.ReplicationControllers(copied.Namespace).Update(copied)
				return err
			}); err != nil {
				c.recorder.Eventf(config, kapi.EventTypeWarning, "ReplicationControllerScaleFailed",
					"Failed to scale replication controler %q from %d to %d: %v", deployment.Name, oldReplicaCount, newReplicaCount, err)
				return err
			}

			c.recorder.Eventf(config, kapi.EventTypeNormal, "ReplicationControllerScaled", "Scaled replication controller %q from %d to %d", copied.Name, oldReplicaCount, newReplicaCount)
			toAppend = *copied
		}

		updatedDeployments = append(updatedDeployments, toAppend)
	}

	// As the deployment configuration has changed, we need to make sure to clean
	// up old deployments if we have now reached our deployment history quota
	if err := c.cleanupOldDeployments(updatedDeployments, config); err != nil {
		c.recorder.Eventf(config, kapi.EventTypeWarning, "ReplicationControllerCleanupFailed", "Couldn't clean up replication controllers: %v", err)
	}

	return c.updateStatus(config, updatedDeployments)
}
Ejemplo n.º 2
0
func (c *DeploymentController) work() bool {
	key, quit := c.queue.Get()
	if quit {
		return true
	}
	defer c.queue.Done(key)

	rc, err := c.getByKey(key.(string))
	if err != nil {
		glog.Error(err.Error())
	}

	if rc == nil {
		return false
	}

	copied, err := deployutil.DeploymentDeepCopy(rc)
	if err != nil {
		glog.Error(err.Error())
		return false
	}

	err = c.Handle(copied)
	c.handleErr(err, key, copied)

	return false
}
Ejemplo n.º 3
0
// Handle implements the loop that processes deployment configs. Since this controller started
// using caches, the provided config MUST be deep-copied beforehand (see work() in factory.go).
func (c *DeploymentConfigController) Handle(config *deployapi.DeploymentConfig) error {
	// There's nothing to reconcile until the version is nonzero.
	if config.Status.LatestVersion == 0 {
		return c.updateStatus(config, []kapi.ReplicationController{})
	}

	// Find all deployments owned by the deployment config.
	selector := deployutil.ConfigSelector(config.Name)
	existingDeployments, err := c.rcStore.ReplicationControllers(config.Namespace).List(selector)
	if err != nil {
		return err
	}

	// In case the deployment config has been marked for deletion, merely update its status with
	// the latest available information. Some deletions make take some time to complete so there
	// is value in doing this.
	if config.DeletionTimestamp != nil {
		return c.updateStatus(config, existingDeployments)
	}

	latestIsDeployed, latestDeployment := deployutil.LatestDeploymentInfo(config, existingDeployments)
	// If the latest deployment doesn't exist yet, cancel any running
	// deployments to allow them to be superceded by the new config version.
	awaitingCancellations := false
	if !latestIsDeployed {
		for i := range existingDeployments {
			deployment := existingDeployments[i]
			// Skip deployments with an outcome.
			if deployutil.IsTerminatedDeployment(&deployment) {
				continue
			}
			// Cancel running deployments.
			awaitingCancellations = true
			if !deployutil.IsDeploymentCancelled(&deployment) {

				// Retry faster on conflicts
				var updatedDeployment *kapi.ReplicationController
				if err := kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
					rc, err := c.rcStore.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
					if kapierrors.IsNotFound(err) {
						return nil
					}
					if err != nil {
						return err
					}
					copied, err := deployutil.DeploymentDeepCopy(rc)
					if err != nil {
						return err
					}
					copied.Annotations[deployapi.DeploymentCancelledAnnotation] = deployapi.DeploymentCancelledAnnotationValue
					copied.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentCancelledNewerDeploymentExists
					updatedDeployment, err = c.rn.ReplicationControllers(copied.Namespace).Update(copied)
					return err
				}); err != nil {
					c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCancellationFailed", "Failed to cancel deployment %q superceded by version %d: %s", deployment.Name, config.Status.LatestVersion, err)
				} else {
					if updatedDeployment != nil {
						// replace the current deployment with the updated copy so that a future update has a chance at working
						existingDeployments[i] = *updatedDeployment
						c.recorder.Eventf(config, kapi.EventTypeNormal, "DeploymentCancelled", "Cancelled deployment %q superceded by version %d", deployment.Name, config.Status.LatestVersion)
					}
				}
			}
		}
	}
	// Wait for deployment cancellations before reconciling or creating a new
	// deployment to avoid competing with existing deployment processes.
	if awaitingCancellations {
		c.recorder.Eventf(config, kapi.EventTypeNormal, "DeploymentAwaitingCancellation", "Deployment of version %d awaiting cancellation of older running deployments", config.Status.LatestVersion)
		return fmt.Errorf("found previous inflight deployment for %s - requeuing", deployutil.LabelForDeploymentConfig(config))
	}
	// If the latest deployment already exists, reconcile existing deployments
	// and return early.
	if latestIsDeployed {
		// If the latest deployment is still running, try again later. We don't
		// want to compete with the deployer.
		if !deployutil.IsTerminatedDeployment(latestDeployment) {
			return c.updateStatus(config, existingDeployments)
		}

		return c.reconcileDeployments(existingDeployments, config)
	}
	// If the config is paused we shouldn't create new deployments for it.
	if config.Spec.Paused {
		// in order for revision history limit cleanup to work for paused
		// deployments, we need to trigger it here
		if err := c.cleanupOldDeployments(existingDeployments, config); err != nil {
			c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCleanupFailed", "Couldn't clean up deployments: %v", err)
		}

		return c.updateStatus(config, existingDeployments)
	}
	// No deployments are running and the latest deployment doesn't exist, so
	// create the new deployment.
	deployment, err := deployutil.MakeDeployment(config, c.codec)
	if err != nil {
		return fatalError(fmt.Sprintf("couldn't make deployment from (potentially invalid) deployment config %s: %v", deployutil.LabelForDeploymentConfig(config), err))
	}
	created, err := c.rn.ReplicationControllers(config.Namespace).Create(deployment)
	if err != nil {
		// If the deployment was already created, just move on. The cache could be
		// stale, or another process could have already handled this update.
		if kapierrors.IsAlreadyExists(err) {
			return c.updateStatus(config, existingDeployments)
		}
		c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCreationFailed", "Couldn't deploy version %d: %s", config.Status.LatestVersion, err)
		return fmt.Errorf("couldn't create deployment for deployment config %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}
	c.recorder.Eventf(config, kapi.EventTypeNormal, "DeploymentCreated", "Created new deployment %q for version %d", created.Name, config.Status.LatestVersion)

	// As we've just created a new deployment, we need to make sure to clean
	// up old deployments if we have reached our deployment history quota
	existingDeployments = append(existingDeployments, *created)
	if err := c.cleanupOldDeployments(existingDeployments, config); err != nil {
		c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCleanupFailed", "Couldn't clean up deployments: %v", err)
	}

	return c.updateStatus(config, existingDeployments)
}
Ejemplo n.º 4
0
// reconcileDeployments reconciles existing deployment replica counts which
// could have diverged outside the deployment process (e.g. due to auto or
// manual scaling, or partial deployments). The active deployment is the last
// successful deployment, not necessarily the latest in terms of the config
// version. The active deployment replica count should follow the config, and
// all other deployments should be scaled to zero.
//
// Previously, scaling behavior was that the config replica count was used
// only for initial deployments and the active deployment had to be scaled up
// directly. To continue supporting that old behavior we must detect when the
// deployment has been directly manipulated, and if so, preserve the directly
// updated value and sync the config with the deployment.
func (c *DeploymentConfigController) reconcileDeployments(existingDeployments []kapi.ReplicationController, config *deployapi.DeploymentConfig) error {
	latestIsDeployed, latestDeployment := deployutil.LatestDeploymentInfo(config, existingDeployments)
	if !latestIsDeployed {
		// We shouldn't be reconciling if the latest deployment hasn't been
		// created; this is enforced on the calling side, but double checking
		// can't hurt.
		return c.updateStatus(config, existingDeployments)
	}
	activeDeployment := deployutil.ActiveDeployment(existingDeployments)
	// Compute the replica count for the active deployment (even if the active
	// deployment doesn't exist). The active replica count is the value that
	// should be assigned to the config, to allow the replica propagation to
	// flow downward from the config.
	//
	// By default we'll assume the config replicas should be used to update the
	// active deployment except in special cases (like first sync or externally
	// updated deployments.)
	activeReplicas := config.Spec.Replicas
	source := "the deploymentConfig itself (no change)"

	activeDeploymentExists := activeDeployment != nil
	activeDeploymentIsLatest := activeDeploymentExists && activeDeployment.Name == latestDeployment.Name
	latestDesiredReplicas, latestHasDesiredReplicas := deployutil.DeploymentDesiredReplicas(latestDeployment)

	switch {
	case activeDeploymentExists && activeDeploymentIsLatest:
		// The active/latest deployment follows the config unless this is its first
		// sync or if an external change to the deployment replicas is detected.
		lastActiveReplicas, hasLastActiveReplicas := deployutil.DeploymentReplicas(activeDeployment)
		if !hasLastActiveReplicas || lastActiveReplicas != activeDeployment.Spec.Replicas {
			activeReplicas = activeDeployment.Spec.Replicas
			source = fmt.Sprintf("the latest/active deployment %q which was scaled directly or has not previously been synced", deployutil.LabelForDeployment(activeDeployment))
		}
	case activeDeploymentExists && !activeDeploymentIsLatest:
		// The active/non-latest deployment follows the config if it was
		// previously synced; if this is the first sync, infer what the config
		// value should be based on either the latest desired or whatever the
		// deployment is currently scaled to.
		_, hasLastActiveReplicas := deployutil.DeploymentReplicas(activeDeployment)
		if hasLastActiveReplicas {
			break
		}
		if latestHasDesiredReplicas {
			activeReplicas = latestDesiredReplicas
			source = fmt.Sprintf("the desired replicas of latest deployment %q which has not been previously synced", deployutil.LabelForDeployment(latestDeployment))
		} else if activeDeployment.Spec.Replicas > 0 {
			activeReplicas = activeDeployment.Spec.Replicas
			source = fmt.Sprintf("the active deployment %q which has not been previously synced", deployutil.LabelForDeployment(activeDeployment))
		}
	case !activeDeploymentExists && latestHasDesiredReplicas:
		// If there's no active deployment, use the latest desired, if available.
		activeReplicas = latestDesiredReplicas
		source = fmt.Sprintf("the desired replicas of latest deployment %q with no active deployment", deployutil.LabelForDeployment(latestDeployment))
	}

	// Bring the config in sync with the deployment. Once we know the config
	// accurately represents the desired replica count of the active deployment,
	// we can safely reconcile deployments.
	//
	// If the deployment config is test, never update the deployment config based
	// on deployments, since test behavior overrides user scaling.
	switch {
	case config.Spec.Replicas == activeReplicas:
	case config.Spec.Test:
		glog.V(4).Infof("Detected changed replicas for test deploymentConfig %q, ignoring that change", deployutil.LabelForDeploymentConfig(config))
	default:
		copied, err := deployutil.DeploymentConfigDeepCopy(config)
		if err != nil {
			return err
		}
		oldReplicas := copied.Spec.Replicas
		copied.Spec.Replicas = activeReplicas
		config, err = c.dn.DeploymentConfigs(copied.Namespace).Update(copied)
		if err != nil {
			return err
		}
		glog.V(4).Infof("Synced deploymentConfig %q replicas from %d to %d based on %s", deployutil.LabelForDeploymentConfig(config), oldReplicas, activeReplicas, source)
	}

	// Reconcile deployments. The active deployment follows the config, and all
	// other deployments should be scaled to zero.
	var updatedDeployments []kapi.ReplicationController
	for i := range existingDeployments {
		deployment := existingDeployments[i]
		toAppend := deployment

		isActiveDeployment := activeDeployment != nil && deployment.Name == activeDeployment.Name

		oldReplicaCount := deployment.Spec.Replicas
		newReplicaCount := int32(0)
		if isActiveDeployment {
			newReplicaCount = activeReplicas
		}
		if config.Spec.Test {
			glog.V(4).Infof("Deployment config %q is test and deployment %q will be scaled down", deployutil.LabelForDeploymentConfig(config), deployutil.LabelForDeployment(&deployment))
			newReplicaCount = 0
		}
		lastReplicas, hasLastReplicas := deployutil.DeploymentReplicas(&deployment)
		// Only update if necessary.
		var copied *kapi.ReplicationController
		if !hasLastReplicas || newReplicaCount != oldReplicaCount || lastReplicas != newReplicaCount {
			if err := kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
				// refresh the replication controller version
				rc, err := c.rcStore.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
				if err != nil {
					return err
				}
				copied, err = deployutil.DeploymentDeepCopy(rc)
				if err != nil {
					glog.V(2).Infof("Deep copy of deployment %q failed: %v", rc.Name, err)
					return err
				}
				copied.Spec.Replicas = newReplicaCount
				copied.Annotations[deployapi.DeploymentReplicasAnnotation] = strconv.Itoa(int(newReplicaCount))
				_, err = c.rn.ReplicationControllers(copied.Namespace).Update(copied)
				return err
			}); err != nil {
				c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentScaleFailed",
					"Failed to scale deployment %q from %d to %d: %v", deployment.Name, oldReplicaCount, newReplicaCount, err)
				return err
			}

			// Only report scaling events if we changed the replica count.
			if oldReplicaCount != newReplicaCount {
				c.recorder.Eventf(config, kapi.EventTypeNormal, "DeploymentScaled",
					"Scaled deployment %q from %d to %d", copied.Name, oldReplicaCount, newReplicaCount)
			} else {
				glog.V(4).Infof("Updated deployment %q replica annotation to match current replica count %d", deployutil.LabelForDeployment(copied), newReplicaCount)
			}
			toAppend = *copied
		}

		updatedDeployments = append(updatedDeployments, toAppend)
	}

	// As the deployment configuration has changed, we need to make sure to clean
	// up old deployments if we have now reached our deployment history quota
	if err := c.cleanupOldDeployments(existingDeployments, config); err != nil {
		c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCleanupFailed", "Couldn't clean up deployments: %v", err)
	}

	return c.updateStatus(config, updatedDeployments)
}
Ejemplo n.º 5
0
// Handle processes deployment and either creates a deployer pod or responds
// to a terminal deployment status. Since this controller started using caches,
// the provided rc MUST be deep-copied beforehand (see work() in factory.go).
func (c *DeploymentController) Handle(deployment *kapi.ReplicationController) error {
	// Copy all the annotations from the deployment.
	updatedAnnotations := make(map[string]string)
	for key, value := range deployment.Annotations {
		updatedAnnotations[key] = value
	}

	currentStatus := deployutil.DeploymentStatusFor(deployment)
	nextStatus := currentStatus

	deployerPodName := deployutil.DeployerPodNameForDeployment(deployment.Name)
	deployer, deployerErr := c.getPod(deployment.Namespace, deployerPodName)
	if deployerErr == nil {
		nextStatus = c.nextStatus(deployer, deployment, updatedAnnotations)
	}

	switch currentStatus {
	case deployapi.DeploymentStatusNew:
		// If the deployment has been cancelled, don't create a deployer pod.
		// Instead try to delete any deployer pods found and transition the
		// deployment to Pending so that the deployment config controller
		// continues to see the deployment as in-flight. Eventually the deletion
		// of the deployer pod should cause a requeue of this deployment and
		// then it can be transitioned to Failed by this controller.
		if deployutil.IsDeploymentCancelled(deployment) {
			nextStatus = deployapi.DeploymentStatusPending
			if err := c.cleanupDeployerPods(deployment); err != nil {
				return err
			}
			break
		}

		// If the pod already exists, it's possible that a previous CreatePod
		// succeeded but the deployment state update failed and now we're re-
		// entering. Ensure that the pod is the one we created by verifying the
		// annotation on it, and throw a retryable error.
		if deployerErr != nil && !kerrors.IsNotFound(deployerErr) {
			return fmt.Errorf("couldn't fetch existing deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), deployerErr)
		}
		if deployerErr == nil && deployer != nil {
			// Do a stronger check to validate that the existing deployer pod is
			// actually for this deployment, and if not, fail this deployment.
			//
			// TODO: Investigate checking the container image of the running pod and
			// comparing with the intended deployer pod image. If we do so, we'll need
			// to ensure that changes to 'unrelated' pods don't result in updates to
			// the deployment. So, the image check will have to be done in other areas
			// of the code as well.
			if deployutil.DeploymentNameFor(deployer) != deployment.Name {
				nextStatus = deployapi.DeploymentStatusFailed
				updatedAnnotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedUnrelatedDeploymentExists
				c.emitDeploymentEvent(deployment, kapi.EventTypeWarning, "FailedCreate", fmt.Sprintf("Error creating deployer pod since another pod with the same name (%q) exists", deployer.Name))
				glog.V(2).Infof("Couldn't create deployer pod for %s since an unrelated pod with the same name (%q) exists", deployutil.LabelForDeployment(deployment), deployer.Name)
			} else {
				// Update to pending or to the appropriate status relative to the existing validated deployer pod.
				updatedAnnotations[deployapi.DeploymentPodAnnotation] = deployer.Name
				nextStatus = nextStatusComp(nextStatus, deployapi.DeploymentStatusPending)
				glog.V(4).Infof("Detected existing deployer pod %s for deployment %s", deployer.Name, deployutil.LabelForDeployment(deployment))
			}
			// Don't try and re-create the deployer pod.
			break
		}

		if _, ok := deployment.Annotations[deployapi.DeploymentIgnorePodAnnotation]; ok {
			return nil
		}

		// Generate a deployer pod spec.
		deployerPod, err := c.makeDeployerPod(deployment)
		if err != nil {
			return fatalError(fmt.Sprintf("couldn't make deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err))
		}
		// Create the deployer pod.
		deploymentPod, err := c.pn.Pods(deployment.Namespace).Create(deployerPod)
		// Retry on error.
		if err != nil {
			return actionableError(fmt.Sprintf("couldn't create deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err))
		}
		updatedAnnotations[deployapi.DeploymentPodAnnotation] = deploymentPod.Name
		nextStatus = deployapi.DeploymentStatusPending
		glog.V(4).Infof("Created deployer pod %s for deployment %s", deploymentPod.Name, deployutil.LabelForDeployment(deployment))

	case deployapi.DeploymentStatusPending, deployapi.DeploymentStatusRunning:
		switch {
		case kerrors.IsNotFound(deployerErr):
			nextStatus = deployapi.DeploymentStatusFailed
			// If the deployment is cancelled here then we deleted the deployer in a previous
			// resync of the deployment.
			if !deployutil.IsDeploymentCancelled(deployment) {
				updatedAnnotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedDeployerPodNoLongerExists
				c.emitDeploymentEvent(deployment, kapi.EventTypeWarning, "Failed", fmt.Sprintf("Deployer pod %q has gone missing", deployerPodName))
				deployerErr = fmt.Errorf("Failing deployment %q because its deployer pod %q disappeared", deployutil.LabelForDeployment(deployment), deployerPodName)
				utilruntime.HandleError(deployerErr)
			}

		case deployerErr != nil:
			// We'll try again later on resync. Continue to process cancellations.
			deployerErr = fmt.Errorf("Error getting deployer pod %q for deployment %q: %v", deployerPodName, deployutil.LabelForDeployment(deployment), deployerErr)
			utilruntime.HandleError(deployerErr)

		default: /* err == nil */
			// If the deployment has been cancelled, delete any deployer pods
			// found and transition the deployment to Pending so that the
			// deployment config controller continues to see the deployment
			// as in-flight. Eventually the deletion of the deployer pod should
			// cause a requeue of this deployment and then it can be transitioned
			// to Failed by this controller.
			if deployutil.IsDeploymentCancelled(deployment) {
				if err := c.cleanupDeployerPods(deployment); err != nil {
					return err
				}
			}
		}

	case deployapi.DeploymentStatusFailed:
		// Try to cleanup once more a cancelled deployment in case hook pods
		// were created just after we issued the first cleanup request.
		if deployutil.IsDeploymentCancelled(deployment) {
			if err := c.cleanupDeployerPods(deployment); err != nil {
				return err
			}
		}

	case deployapi.DeploymentStatusComplete:
		if err := c.cleanupDeployerPods(deployment); err != nil {
			return err
		}
	}

	// Update only if we need to transition to a new phase.
	if deployutil.CanTransitionPhase(currentStatus, nextStatus) {
		deployment, err := deployutil.DeploymentDeepCopy(deployment)
		if err != nil {
			return err
		}

		updatedAnnotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus)
		deployment.Annotations = updatedAnnotations

		// if we are going to transition to failed or complete and scale is non-zero, we'll check one more
		// time to see if we are a test deployment to guarantee that we maintain the test invariant.
		if deployment.Spec.Replicas != 0 && deployutil.IsTerminatedDeployment(deployment) {
			if config, err := deployutil.DecodeDeploymentConfig(deployment, c.codec); err == nil && config.Spec.Test {
				deployment.Spec.Replicas = 0
			}
		}

		if _, err := c.rn.ReplicationControllers(deployment.Namespace).Update(deployment); err != nil {
			return fmt.Errorf("couldn't update deployment %s to status %s: %v", deployutil.LabelForDeployment(deployment), nextStatus, err)
		}
		glog.V(4).Infof("Updated deployment %s status from %s to %s (scale: %d)", deployutil.LabelForDeployment(deployment), currentStatus, nextStatus, deployment.Spec.Replicas)
	}
	return nil
}