コード例 #1
0
ファイル: controller.go プロジェクト: rootfs/origin
func (c *DeploymentConfigController) updateStatus(config *deployapi.DeploymentConfig, deployments []kapi.ReplicationController) error {
	newStatus, err := c.calculateStatus(*config, deployments)
	if err != nil {
		glog.V(2).Infof("Cannot calculate the status for %q: %v", deployutil.LabelForDeploymentConfig(config), err)
		return err
	}

	latestExists, latestRC := deployutil.LatestDeploymentInfo(config, deployments)
	if !latestExists {
		latestRC = nil
	}
	updateConditions(config, &newStatus, latestRC)

	// NOTE: We should update the status of the deployment config only if we need to, otherwise
	// we hotloop between updates.
	if reflect.DeepEqual(newStatus, config.Status) {
		return nil
	}

	copied, err := deployutil.DeploymentConfigDeepCopy(config)
	if err != nil {
		return err
	}

	copied.Status = newStatus
	if _, err := c.dn.DeploymentConfigs(copied.Namespace).UpdateStatus(copied); err != nil {
		glog.V(2).Infof("Cannot update the status for %q: %v", deployutil.LabelForDeploymentConfig(copied), err)
		return err
	}
	glog.V(4).Infof("Updated the status for %q (observed generation: %d)", deployutil.LabelForDeploymentConfig(copied), copied.Status.ObservedGeneration)
	return nil
}
コード例 #2
0
ファイル: factory.go プロジェクト: juanvallejo/origin
func (c *DeploymentConfigController) work() bool {
	key, quit := c.queue.Get()
	if quit {
		return true
	}
	defer c.queue.Done(key)

	dc, err := c.getByKey(key.(string))
	if err != nil {
		glog.Error(err.Error())
	}

	if dc == nil {
		return false
	}

	copied, err := deployutil.DeploymentConfigDeepCopy(dc)
	if err != nil {
		glog.Error(err.Error())
		return false
	}

	err = c.Handle(copied)
	c.handleErr(err, key)

	return false
}
コード例 #3
0
ファイル: controller.go プロジェクト: Xmagicer/origin
// Handle processes deployment triggers for a deployment config.
func (c *DeploymentTriggerController) Handle(config *deployapi.DeploymentConfig) error {
	if len(config.Spec.Triggers) == 0 || config.Spec.Paused {
		return nil
	}

	// Try to decode this deployment config from the encoded annotation found in
	// its latest deployment.
	decoded, err := c.decodeFromLatest(config)
	if err != nil {
		return err
	}

	canTrigger, causes := canTrigger(config, decoded)

	// Return if we cannot trigger a new deployment.
	if !canTrigger {
		return nil
	}

	copied, err := deployutil.DeploymentConfigDeepCopy(config)
	if err != nil {
		return err
	}

	return c.update(copied, causes)
}
コード例 #4
0
ファイル: controller.go プロジェクト: rootfs/origin
// reconcileDeployments reconciles existing deployment replica counts which
// could have diverged outside the deployment process (e.g. due to auto or
// manual scaling, or partial deployments). The active deployment is the last
// successful deployment, not necessarily the latest in terms of the config
// version. The active deployment replica count should follow the config, and
// all other deployments should be scaled to zero.
//
// Previously, scaling behavior was that the config replica count was used
// only for initial deployments and the active deployment had to be scaled up
// directly. To continue supporting that old behavior we must detect when the
// deployment has been directly manipulated, and if so, preserve the directly
// updated value and sync the config with the deployment.
func (c *DeploymentConfigController) reconcileDeployments(existingDeployments []kapi.ReplicationController, config *deployapi.DeploymentConfig) error {
	latestIsDeployed, latestDeployment := deployutil.LatestDeploymentInfo(config, existingDeployments)
	if !latestIsDeployed {
		// We shouldn't be reconciling if the latest deployment hasn't been
		// created; this is enforced on the calling side, but double checking
		// can't hurt.
		return c.updateStatus(config, existingDeployments)
	}
	activeDeployment := deployutil.ActiveDeployment(existingDeployments)
	// Compute the replica count for the active deployment (even if the active
	// deployment doesn't exist). The active replica count is the value that
	// should be assigned to the config, to allow the replica propagation to
	// flow downward from the config.
	//
	// By default we'll assume the config replicas should be used to update the
	// active deployment except in special cases (like first sync or externally
	// updated deployments.)
	activeReplicas := config.Spec.Replicas
	source := "the deploymentConfig itself (no change)"

	activeDeploymentExists := activeDeployment != nil
	activeDeploymentIsLatest := activeDeploymentExists && activeDeployment.Name == latestDeployment.Name
	latestDesiredReplicas, latestHasDesiredReplicas := deployutil.DeploymentDesiredReplicas(latestDeployment)

	switch {
	case activeDeploymentExists && activeDeploymentIsLatest:
		// The active/latest deployment follows the config unless this is its first
		// sync or if an external change to the deployment replicas is detected.
		lastActiveReplicas, hasLastActiveReplicas := deployutil.DeploymentReplicas(activeDeployment)
		if !hasLastActiveReplicas || lastActiveReplicas != activeDeployment.Spec.Replicas {
			activeReplicas = activeDeployment.Spec.Replicas
			source = fmt.Sprintf("the latest/active deployment %q which was scaled directly or has not previously been synced", deployutil.LabelForDeployment(activeDeployment))
		}
	case activeDeploymentExists && !activeDeploymentIsLatest:
		// The active/non-latest deployment follows the config if it was
		// previously synced; if this is the first sync, infer what the config
		// value should be based on either the latest desired or whatever the
		// deployment is currently scaled to.
		_, hasLastActiveReplicas := deployutil.DeploymentReplicas(activeDeployment)
		if hasLastActiveReplicas {
			break
		}
		if latestHasDesiredReplicas {
			activeReplicas = latestDesiredReplicas
			source = fmt.Sprintf("the desired replicas of latest deployment %q which has not been previously synced", deployutil.LabelForDeployment(latestDeployment))
		} else if activeDeployment.Spec.Replicas > 0 {
			activeReplicas = activeDeployment.Spec.Replicas
			source = fmt.Sprintf("the active deployment %q which has not been previously synced", deployutil.LabelForDeployment(activeDeployment))
		}
	case !activeDeploymentExists && latestHasDesiredReplicas:
		// If there's no active deployment, use the latest desired, if available.
		activeReplicas = latestDesiredReplicas
		source = fmt.Sprintf("the desired replicas of latest deployment %q with no active deployment", deployutil.LabelForDeployment(latestDeployment))
	}

	// Bring the config in sync with the deployment. Once we know the config
	// accurately represents the desired replica count of the active deployment,
	// we can safely reconcile deployments.
	//
	// If the deployment config is test, never update the deployment config based
	// on deployments, since test behavior overrides user scaling.
	switch {
	case config.Spec.Replicas == activeReplicas:
	case config.Spec.Test:
		glog.V(4).Infof("Detected changed replicas for test deploymentConfig %q, ignoring that change", deployutil.LabelForDeploymentConfig(config))
	default:
		copied, err := deployutil.DeploymentConfigDeepCopy(config)
		if err != nil {
			return err
		}
		oldReplicas := copied.Spec.Replicas
		copied.Spec.Replicas = activeReplicas
		config, err = c.dn.DeploymentConfigs(copied.Namespace).Update(copied)
		if err != nil {
			return err
		}
		glog.V(4).Infof("Synced deploymentConfig %q replicas from %d to %d based on %s", deployutil.LabelForDeploymentConfig(config), oldReplicas, activeReplicas, source)
	}

	// Reconcile deployments. The active deployment follows the config, and all
	// other deployments should be scaled to zero.
	var updatedDeployments []kapi.ReplicationController
	for i := range existingDeployments {
		deployment := existingDeployments[i]
		toAppend := deployment

		isActiveDeployment := activeDeployment != nil && deployment.Name == activeDeployment.Name

		oldReplicaCount := deployment.Spec.Replicas
		newReplicaCount := int32(0)
		if isActiveDeployment {
			newReplicaCount = activeReplicas
		}
		if config.Spec.Test {
			glog.V(4).Infof("Deployment config %q is test and deployment %q will be scaled down", deployutil.LabelForDeploymentConfig(config), deployutil.LabelForDeployment(&deployment))
			newReplicaCount = 0
		}
		lastReplicas, hasLastReplicas := deployutil.DeploymentReplicas(&deployment)
		// Only update if necessary.
		var copied *kapi.ReplicationController
		if !hasLastReplicas || newReplicaCount != oldReplicaCount || lastReplicas != newReplicaCount {
			if err := kclient.RetryOnConflict(kclient.DefaultBackoff, func() error {
				// refresh the replication controller version
				rc, err := c.rcStore.ReplicationControllers(deployment.Namespace).Get(deployment.Name)
				if err != nil {
					return err
				}
				copied, err = deployutil.DeploymentDeepCopy(rc)
				if err != nil {
					glog.V(2).Infof("Deep copy of deployment %q failed: %v", rc.Name, err)
					return err
				}
				copied.Spec.Replicas = newReplicaCount
				copied.Annotations[deployapi.DeploymentReplicasAnnotation] = strconv.Itoa(int(newReplicaCount))
				_, err = c.rn.ReplicationControllers(copied.Namespace).Update(copied)
				return err
			}); err != nil {
				c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentScaleFailed",
					"Failed to scale deployment %q from %d to %d: %v", deployment.Name, oldReplicaCount, newReplicaCount, err)
				return err
			}

			// Only report scaling events if we changed the replica count.
			if oldReplicaCount != newReplicaCount {
				c.recorder.Eventf(config, kapi.EventTypeNormal, "DeploymentScaled",
					"Scaled deployment %q from %d to %d", copied.Name, oldReplicaCount, newReplicaCount)
			} else {
				glog.V(4).Infof("Updated deployment %q replica annotation to match current replica count %d", deployutil.LabelForDeployment(copied), newReplicaCount)
			}
			toAppend = *copied
		}

		updatedDeployments = append(updatedDeployments, toAppend)
	}

	// As the deployment configuration has changed, we need to make sure to clean
	// up old deployments if we have now reached our deployment history quota
	if err := c.cleanupOldDeployments(existingDeployments, config); err != nil {
		c.recorder.Eventf(config, kapi.EventTypeWarning, "DeploymentCleanupFailed", "Couldn't clean up deployments: %v", err)
	}

	return c.updateStatus(config, updatedDeployments)
}
コード例 #5
0
ファイル: controller.go プロジェクト: abhgupta/origin
// Handle processes image change triggers associated with imagestream.
func (c *ImageChangeController) Handle(stream *imageapi.ImageStream) error {
	configs, err := c.dcLister.GetConfigsForImageStream(stream)
	if err != nil {
		return fmt.Errorf("couldn't get list of deployment configs while handling image stream %q: %v", imageapi.LabelForStream(stream), err)
	}

	// Find any configs which should be updated based on the new image state
	var configsToUpdate []*deployapi.DeploymentConfig
	for n, config := range configs {
		glog.V(4).Infof("Detecting image changes for deployment config %q", deployutil.LabelForDeploymentConfig(config))
		hasImageChange := false

		for j := range config.Spec.Triggers {
			// because config can be copied during this loop, make sure we load from config for subsequent loops
			trigger := config.Spec.Triggers[j]
			params := trigger.ImageChangeParams

			// Only automatic image change triggers should fire
			if trigger.Type != deployapi.DeploymentTriggerOnImageChange {
				continue
			}

			// All initial deployments should have their images resolved in order to
			// be able to work and not try to pull non-existent images from DockerHub.
			// Deployments with automatic set to false that have been deployed at least
			// once shouldn't have their images updated.
			if (!params.Automatic || config.Spec.Paused) && len(params.LastTriggeredImage) > 0 {
				continue
			}

			// Check if the image stream matches the trigger
			if !triggerMatchesImage(config, params, stream) {
				continue
			}

			_, tag, ok := imageapi.SplitImageStreamTag(params.From.Name)
			if !ok {
				glog.Warningf("Invalid image stream tag %q in %q", params.From.Name, deployutil.LabelForDeploymentConfig(config))
				continue
			}

			// Find the latest tag event for the trigger tag
			latestEvent := imageapi.LatestTaggedImage(stream, tag)
			if latestEvent == nil {
				glog.V(5).Infof("Couldn't find latest tag event for tag %q in image stream %q", tag, imageapi.LabelForStream(stream))
				continue
			}

			// Ensure a change occurred
			if len(latestEvent.DockerImageReference) == 0 || latestEvent.DockerImageReference == params.LastTriggeredImage {
				glog.V(4).Infof("No image changes for deployment config %q were detected", deployutil.LabelForDeploymentConfig(config))
				continue
			}

			names := sets.NewString(params.ContainerNames...)
			for i := range config.Spec.Template.Spec.Containers {
				container := &config.Spec.Template.Spec.Containers[i]
				if !names.Has(container.Name) {
					continue
				}

				if !hasImageChange {
					// create a copy prior to mutation
					result, err := deployutil.DeploymentConfigDeepCopy(configs[n])
					if err != nil {
						utilruntime.HandleError(err)
						continue
					}
					configs[n] = result
					container = &configs[n].Spec.Template.Spec.Containers[i]
					params = configs[n].Spec.Triggers[j].ImageChangeParams
				}

				// Update the image
				container.Image = latestEvent.DockerImageReference
				// Log the last triggered image ID
				params.LastTriggeredImage = latestEvent.DockerImageReference
				hasImageChange = true
			}
		}

		if hasImageChange {
			configsToUpdate = append(configsToUpdate, configs[n])
		}
	}

	// Attempt to regenerate all configs which may contain image updates
	anyFailed := false
	for _, config := range configsToUpdate {
		if _, err := c.dn.DeploymentConfigs(config.Namespace).Update(config); err != nil {
			utilruntime.HandleError(err)
			anyFailed = true
		} else {
			glog.V(4).Infof("Updated deployment config %q for trigger on image stream %q",
				deployutil.LabelForDeploymentConfig(config), imageapi.LabelForStream(stream))
		}
	}

	if anyFailed {
		return fmt.Errorf("couldn't update some deployment configs for trigger on image stream %q", imageapi.LabelForStream(stream))
	}

	return nil
}