Пример #1
0
// retry resets the status of the latest deployment to New, which will cause
// the deployment to be retried. An error is returned if the deployment is not
// currently in a failed state.
func (o *DeployOptions) retry(config *deployapi.DeploymentConfig, out io.Writer) error {
	if config.LatestVersion == 0 {
		return fmt.Errorf("no deployments found for %s/%s", config.Namespace, config.Name)
	}
	deploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName)
	if err != nil {
		if kerrors.IsNotFound(err) {
			return fmt.Errorf("Unable to find the latest deployment (#%d).\nYou can start a new deployment using the --latest option.", config.LatestVersion)
		}
		return err
	}

	if status := deployutil.DeploymentStatusFor(deployment); status != deployapi.DeploymentStatusFailed {
		message := fmt.Sprintf("#%d is %s; only failed deployments can be retried.\n", config.LatestVersion, status)
		if status == deployapi.DeploymentStatusComplete {
			message += "You can start a new deployment using the --latest option."
		} else {
			message += "Optionally, you can cancel this deployment using the --cancel option."
		}

		return fmt.Errorf(message)
	}

	// Delete the deployer pod as well as the deployment hooks pods, if any
	pods, err := o.kubeClient.Pods(config.Namespace).List(deployutil.DeployerPodSelector(deploymentName), fields.Everything())
	if err != nil {
		return fmt.Errorf("Failed to list deployer/hook pods for deployment #%d: %v", config.LatestVersion, err)
	}
	for _, pod := range pods.Items {
		err := o.kubeClient.Pods(pod.Namespace).Delete(pod.Name, kapi.NewDeleteOptions(0))
		if err != nil {
			return fmt.Errorf("Failed to delete deployer/hook pod %s for deployment #%d: %v", pod.Name, config.LatestVersion, err)
		}
	}

	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)
	// clear out the cancellation flag as well as any previous status-reason annotation
	delete(deployment.Annotations, deployapi.DeploymentStatusReasonAnnotation)
	delete(deployment.Annotations, deployapi.DeploymentCancelledAnnotation)
	_, err = o.kubeClient.ReplicationControllers(deployment.Namespace).Update(deployment)
	if err == nil {
		fmt.Fprintf(out, "retried #%d\n", config.LatestVersion)
	}
	return err
}
Пример #2
0
func (p *deploymentDeleter) DeleteDeployment(deployment *kapi.ReplicationController) error {
	glog.V(4).Infof("Deleting deployment %q", deployment.Name)
	// If the deployment is failed we need to remove its deployer pods, too.
	if deployutil.IsFailedDeployment(deployment) {
		dpSelector := deployutil.DeployerPodSelector(deployment.Name)
		deployers, err := p.pods.Pods(deployment.Namespace).List(kapi.ListOptions{LabelSelector: dpSelector})
		if err != nil {
			glog.Warning("Cannot list deployer pods for %q: %v\n", deployment.Name, err)
		} else {
			for _, pod := range deployers.Items {
				if err := p.pods.Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
					glog.Warning("Cannot remove deployer pod %q: %v\n", pod.Name, err)
				}
			}
		}
	}
	return p.deployments.ReplicationControllers(deployment.Namespace).Delete(deployment.Name, nil)
}
Пример #3
0
func (c *DeploymentController) cleanupDeployerPods(deployment *kapi.ReplicationController) error {
	selector := deployutil.DeployerPodSelector(deployment.Name)
	deployerList, err := c.podStore.Pods(deployment.Namespace).List(selector)
	if err != nil {
		return fmt.Errorf("couldn't fetch deployer pods for %q: %v", deployutil.LabelForDeployment(deployment), err)
	}

	cleanedAll := true
	for _, deployerPod := range deployerList.Items {
		if err := c.pn.Pods(deployerPod.Namespace).Delete(deployerPod.Name, &kapi.DeleteOptions{}); err != nil && !kerrors.IsNotFound(err) {
			// if the pod deletion failed, then log the error and continue
			// we will try to delete any remaining deployer pods and return an error later
			utilruntime.HandleError(fmt.Errorf("couldn't delete completed deployer pod %q for deployment %q: %v", deployerPod.Name, deployutil.LabelForDeployment(deployment), err))
			cleanedAll = false
		}
	}

	if !cleanedAll {
		return actionableError(fmt.Sprintf("couldn't clean up all deployer pods for %s", deployment.Name))
	}
	return nil
}
Пример #4
0
// retry resets the status of the latest deployment to New, which will cause
// the deployment to be retried. An error is returned if the deployment is not
// currently in a failed state.
func (o DeployOptions) retry(config *deployapi.DeploymentConfig) error {
	if config.Spec.Paused {
		return fmt.Errorf("cannot retry a paused deployment config")
	}
	if config.Status.LatestVersion == 0 {
		return fmt.Errorf("no deployments found for %s/%s", config.Namespace, config.Name)
	}
	// TODO: This implies that deploymentconfig.status.latestVersion is always synced. Currently,
	// that's the case because clients (oc, trigger controllers) are updating the status directly.
	// Clients should be acting either on spec or on annotations and status updates should be a
	// responsibility of the main controller. We need to start by unplugging this assumption from
	// our client tools.
	deploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName)
	if err != nil {
		if kerrors.IsNotFound(err) {
			return fmt.Errorf("unable to find the latest deployment (#%d).\nYou can start a new deployment with 'oc deploy --latest dc/%s'.", config.Status.LatestVersion, config.Name)
		}
		return err
	}

	if !deployutil.IsFailedDeployment(deployment) {
		message := fmt.Sprintf("#%d is %s; only failed deployments can be retried.\n", config.Status.LatestVersion, deployutil.DeploymentStatusFor(deployment))
		if deployutil.IsCompleteDeployment(deployment) {
			message += fmt.Sprintf("You can start a new deployment with 'oc deploy --latest dc/%s'.", config.Name)
		} else {
			message += fmt.Sprintf("Optionally, you can cancel this deployment with 'oc deploy --cancel dc/%s'.", config.Name)
		}

		return fmt.Errorf(message)
	}

	// Delete the deployer pod as well as the deployment hooks pods, if any
	pods, err := o.kubeClient.Pods(config.Namespace).List(kapi.ListOptions{LabelSelector: deployutil.DeployerPodSelector(deploymentName)})
	if err != nil {
		return fmt.Errorf("failed to list deployer/hook pods for deployment #%d: %v", config.Status.LatestVersion, err)
	}
	for _, pod := range pods.Items {
		err := o.kubeClient.Pods(pod.Namespace).Delete(pod.Name, kapi.NewDeleteOptions(0))
		if err != nil {
			return fmt.Errorf("failed to delete deployer/hook pod %s for deployment #%d: %v", pod.Name, config.Status.LatestVersion, err)
		}
	}

	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)
	// clear out the cancellation flag as well as any previous status-reason annotation
	delete(deployment.Annotations, deployapi.DeploymentStatusReasonAnnotation)
	delete(deployment.Annotations, deployapi.DeploymentCancelledAnnotation)
	_, err = o.kubeClient.ReplicationControllers(deployment.Namespace).Update(deployment)
	if err != nil {
		return err
	}
	fmt.Fprintf(o.out, "Retried #%d\n", config.Status.LatestVersion)
	if o.follow {
		return o.getLogs(config)
	}
	fmt.Fprintf(o.out, "Use '%s logs -f dc/%s' to track its progress.\n", o.baseCommandName, config.Name)
	return nil
}
Пример #5
0
// Stop scales a replication controller via its deployment configuration down to
// zero replicas, waits for all of them to get deleted and then deletes both the
// replication controller and its deployment configuration.
func (reaper *DeploymentConfigReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *kapi.DeleteOptions) error {
	// Pause the deployment configuration to prevent the new deployments from
	// being triggered.
	config, err := reaper.pause(namespace, name)
	configNotFound := kerrors.IsNotFound(err)
	if err != nil && !configNotFound {
		return err
	}

	var (
		isPaused bool
		legacy   bool
	)
	// Determine if the deployment config controller noticed the pause.
	if !configNotFound {
		if err := wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
			dc, err := reaper.oc.DeploymentConfigs(namespace).Get(name)
			if err != nil {
				return false, err
			}
			isPaused = dc.Spec.Paused
			return dc.Status.ObservedGeneration >= config.Generation, nil
		}); err != nil {
			return err
		}

		// If we failed to pause the deployment config, it means we are talking to
		// old API that does not support pausing. In that case, we delete the
		// deployment config to stay backward compatible.
		if !isPaused {
			if err := reaper.oc.DeploymentConfigs(namespace).Delete(name); err != nil {
				return err
			}
			// Setting this to true avoid deleting the config at the end.
			legacy = true
		}
	}

	// Clean up deployments related to the config. Even if the deployment
	// configuration has been deleted, we want to sweep the existing replication
	// controllers and clean them up.
	options := kapi.ListOptions{LabelSelector: util.ConfigSelector(name)}
	rcList, err := reaper.kc.ReplicationControllers(namespace).List(options)
	if err != nil {
		return err
	}
	rcReaper, err := kubectl.ReaperFor(kapi.Kind("ReplicationController"), reaper.kc)
	if err != nil {
		return err
	}

	// If there is neither a config nor any deployments, nor any deployer pods, we can return NotFound.
	deployments := rcList.Items

	if configNotFound && len(deployments) == 0 {
		return kerrors.NewNotFound(kapi.Resource("deploymentconfig"), name)
	}

	for _, rc := range deployments {
		if err = rcReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil {
			// Better not error out here...
			glog.Infof("Cannot delete ReplicationController %s/%s for deployment config %s/%s: %v", rc.Namespace, rc.Name, namespace, name, err)
		}

		// Only remove deployer pods when the deployment was failed. For completed
		// deployment the pods should be already deleted.
		if !util.IsFailedDeployment(&rc) {
			continue
		}

		// Delete all deployer and hook pods
		options = kapi.ListOptions{LabelSelector: util.DeployerPodSelector(rc.Name)}
		podList, err := reaper.kc.Pods(rc.Namespace).List(options)
		if err != nil {
			return err
		}
		for _, pod := range podList.Items {
			err := reaper.kc.Pods(pod.Namespace).Delete(pod.Name, gracePeriod)
			if err != nil {
				// Better not error out here...
				glog.Infof("Cannot delete lifecycle Pod %s/%s for deployment config %s/%s: %v", pod.Namespace, pod.Name, namespace, name, err)
			}
		}
	}

	// Nothing to delete or we already deleted the deployment config because we
	// failed to pause.
	if configNotFound || legacy {
		return nil
	}

	return reaper.oc.DeploymentConfigs(namespace).Delete(name)
}
Пример #6
0
// Create creates a DeployerPodController.
func (factory *DeployerPodControllerFactory) Create() controller.RunnableController {
	deploymentLW := &deployutil.ListWatcherImpl{
		ListFunc: func() (runtime.Object, error) {
			return factory.KubeClient.ReplicationControllers(kapi.NamespaceAll).List(labels.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return factory.KubeClient.ReplicationControllers(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	deploymentStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(deploymentLW, &kapi.ReplicationController{}, deploymentStore, 2*time.Minute).Run()

	// TODO: These should be filtered somehow to include only the primary
	// deployer pod. For now, the controller is filtering.
	// TODO: Even with the label selector, this is inefficient on the backend
	// and we should work to consolidate namespace-spanning pod watches. For
	// example, the build infra is also watching pods across namespaces.
	podLW := &deployutil.ListWatcherImpl{
		ListFunc: func() (runtime.Object, error) {
			return factory.KubeClient.Pods(kapi.NamespaceAll).List(deployutil.AnyDeployerPodSelector(), fields.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return factory.KubeClient.Pods(kapi.NamespaceAll).Watch(deployutil.AnyDeployerPodSelector(), fields.Everything(), resourceVersion)
		},
	}
	podQueue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(podLW, &kapi.Pod{}, podQueue, 2*time.Minute).Run()

	podController := &DeployerPodController{
		deploymentClient: &deploymentClientImpl{
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				// Try to use the cache first. Trust hits and return them.
				example := &kapi.ReplicationController{ObjectMeta: kapi.ObjectMeta{Namespace: namespace, Name: name}}
				cached, exists, err := deploymentStore.Get(example)
				if err == nil && exists {
					return cached.(*kapi.ReplicationController), nil
				}
				// Double-check with the master for cache misses/errors, since those
				// are rare and API calls are expensive but more reliable.
				return factory.KubeClient.ReplicationControllers(namespace).Get(name)
			},
			updateDeploymentFunc: func(namespace string, deployment *kapi.ReplicationController) (*kapi.ReplicationController, error) {
				return factory.KubeClient.ReplicationControllers(namespace).Update(deployment)
			},
			listDeploymentsForConfigFunc: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
				return factory.KubeClient.ReplicationControllers(namespace).List(deployutil.ConfigSelector(configName))
			},
		},
		deployerPodsFor: func(namespace, name string) (*kapi.PodList, error) {
			return factory.KubeClient.Pods(namespace).List(deployutil.DeployerPodSelector(name), fields.Everything())
		},
		deletePod: func(namespace, name string) error {
			return factory.KubeClient.Pods(namespace).Delete(name, kapi.NewDeleteOptions(0))
		},
	}

	return &controller.RetryController{
		Queue: podQueue,
		RetryManager: controller.NewQueueRetryManager(
			podQueue,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				kutil.HandleError(err)
				// infinite retries for a transient error
				if _, isTransient := err.(transientError); isTransient {
					return true
				}
				// no retries for anything else
				if retries.Count > 0 {
					return false
				}
				return true
			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			pod := obj.(*kapi.Pod)
			return podController.Handle(pod)
		},
	}
}
Пример #7
0
func (o RetryOptions) Run() error {
	allErrs := []error{}
	mapping, err := o.Mapper.RESTMapping(kapi.Kind("ReplicationController"))
	if err != nil {
		return err
	}
	for _, info := range o.Infos {
		config, ok := info.Object.(*deployapi.DeploymentConfig)
		if !ok {
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("expected deployment configuration, got %T", info.Object)))
			continue
		}
		if config.Spec.Paused {
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("unable to retry paused deployment config %q", config.Name)))
			continue
		}
		if config.Status.LatestVersion == 0 {
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("no rollouts found for %q", config.Name)))
			continue
		}

		latestDeploymentName := deployutil.LatestDeploymentNameForConfig(config)
		rc, err := o.Clientset.ReplicationControllers(config.Namespace).Get(latestDeploymentName)
		if err != nil {
			if kerrors.IsNotFound(err) {
				allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("unable to find the latest rollout (#%d).\nYou can start a new rollout with 'oc rollout latest dc/%s'.", config.Status.LatestVersion, config.Name)))
				continue
			}
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("unable to fetch replication controller %q", config.Name)))
			continue
		}

		if !deployutil.IsFailedDeployment(rc) {
			message := fmt.Sprintf("rollout #%d is %s; only failed deployments can be retried.\n", config.Status.LatestVersion, strings.ToLower(string(deployutil.DeploymentStatusFor(rc))))
			if deployutil.IsCompleteDeployment(rc) {
				message += fmt.Sprintf("You can start a new deployment with 'oc rollout latest dc/%s'.", config.Name)
			} else {
				message += fmt.Sprintf("Optionally, you can cancel this deployment with 'oc rollout cancel dc/%s'.", config.Name)
			}
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, errors.New(message)))
			continue
		}

		// Delete the deployer pod as well as the deployment hooks pods, if any
		pods, err := o.Clientset.Pods(config.Namespace).List(kapi.ListOptions{LabelSelector: deployutil.DeployerPodSelector(latestDeploymentName)})
		if err != nil {
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("failed to list deployer/hook pods for deployment #%d: %v", config.Status.LatestVersion, err)))
			continue
		}
		hasError := false
		for _, pod := range pods.Items {
			err := o.Clientset.Pods(pod.Namespace).Delete(pod.Name, kapi.NewDeleteOptions(0))
			if err != nil {
				allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("failed to delete deployer/hook pod %s for deployment #%d: %v", pod.Name, config.Status.LatestVersion, err)))
				hasError = true
			}
		}
		if hasError {
			continue
		}

		patches := set.CalculatePatches([]*resource.Info{{Object: rc, Mapping: mapping}}, o.Encoder, func(*resource.Info) (bool, error) {
			rc.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)
			delete(rc.Annotations, deployapi.DeploymentStatusReasonAnnotation)
			delete(rc.Annotations, deployapi.DeploymentCancelledAnnotation)
			return true, nil
		})

		if len(patches) == 0 {
			kcmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, false, "already retried")
			continue
		}

		if _, err := o.Clientset.ReplicationControllers(rc.Namespace).Patch(rc.Name, kapi.StrategicMergePatchType, patches[0].Patch); err != nil {
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, err))
			continue
		}
		kcmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, false, fmt.Sprintf("retried rollout #%d", config.Status.LatestVersion))
	}

	return utilerrors.NewAggregate(allErrs)
}
Пример #8
0
// Stop scales a replication controller via its deployment configuration down to
// zero replicas, waits for all of them to get deleted and then deletes both the
// replication controller and its deployment configuration.
func (reaper *DeploymentConfigReaper) Stop(namespace, name string, timeout time.Duration, gracePeriod *kapi.DeleteOptions) error {
	// If the config is already deleted, it may still have associated
	// deployments which didn't get cleaned up during prior calls to Stop. If
	// the config can't be found, still make an attempt to clean up the
	// deployments.
	//
	// It's important to delete the config first to avoid an undesirable side
	// effect which can cause the deployment to be re-triggered upon the
	// config's deletion. See https://github.com/openshift/origin/issues/2721
	// for more details.
	err := reaper.oc.DeploymentConfigs(namespace).Delete(name)
	configNotFound := kerrors.IsNotFound(err)
	if err != nil && !configNotFound {
		return err
	}

	// Clean up deployments related to the config.
	options := kapi.ListOptions{LabelSelector: util.ConfigSelector(name)}
	rcList, err := reaper.kc.ReplicationControllers(namespace).List(options)
	if err != nil {
		return err
	}
	rcReaper, err := kubectl.ReaperFor(kapi.Kind("ReplicationController"), reaper.kc)
	if err != nil {
		return err
	}

	// If there is neither a config nor any deployments, nor any deployer pods, we can return NotFound.
	deployments := rcList.Items

	if configNotFound && len(deployments) == 0 {
		return kerrors.NewNotFound(kapi.Resource("deploymentconfig"), name)
	}
	for _, rc := range deployments {
		if err = rcReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil {
			// Better not error out here...
			glog.Infof("Cannot delete ReplicationController %s/%s: %v", rc.Namespace, rc.Name, err)
		}

		// Only remove deployer pods when the deployment was failed. For completed
		// deployment the pods should be already deleted.
		if !util.IsFailedDeployment(&rc) {
			continue
		}

		// Delete all deployer and hook pods
		options = kapi.ListOptions{LabelSelector: util.DeployerPodSelector(rc.Name)}
		podList, err := reaper.kc.Pods(rc.Namespace).List(options)
		if err != nil {
			return err
		}
		for _, pod := range podList.Items {
			err := reaper.kc.Pods(pod.Namespace).Delete(pod.Name, gracePeriod)
			if err != nil {
				// Better not error out here...
				glog.Infof("Cannot delete Pod %s/%s: %v", pod.Namespace, pod.Name, err)
			}
		}
	}

	return nil
}
Пример #9
0
func NewCmdPruneDeployments(f *clientcmd.Factory, parentName, name string, out io.Writer) *cobra.Command {
	cfg := &pruneDeploymentConfig{
		Confirm:         false,
		KeepYoungerThan: 60 * time.Minute,
		KeepComplete:    5,
		KeepFailed:      1,
	}

	cmd := &cobra.Command{
		Use:        name,
		Short:      "Remove old completed and failed deployments",
		Long:       deploymentsLongDesc,
		Example:    fmt.Sprintf(deploymentsExample, parentName, name),
		SuggestFor: []string{"deployment", "deployments"},
		Run: func(cmd *cobra.Command, args []string) {
			if len(args) > 0 {
				glog.Fatalf("No arguments are allowed to this command")
			}

			osClient, kclient, err := f.Clients()
			if err != nil {
				cmdutil.CheckErr(err)
			}

			deploymentConfigList, err := osClient.DeploymentConfigs(kapi.NamespaceAll).List(kapi.ListOptions{})
			if err != nil {
				cmdutil.CheckErr(err)
			}

			deploymentList, err := kclient.ReplicationControllers(kapi.NamespaceAll).List(kapi.ListOptions{})
			if err != nil {
				cmdutil.CheckErr(err)
			}

			deploymentConfigs := []*deployapi.DeploymentConfig{}
			for i := range deploymentConfigList.Items {
				deploymentConfigs = append(deploymentConfigs, &deploymentConfigList.Items[i])
			}

			deployments := []*kapi.ReplicationController{}
			for i := range deploymentList.Items {
				deployments = append(deployments, &deploymentList.Items[i])
			}

			var deploymentPruneFunc prune.PruneFunc

			w := tabwriter.NewWriter(out, 10, 4, 3, ' ', 0)
			defer w.Flush()

			describingPruneDeploymentFunc := func(deployment *kapi.ReplicationController) error {
				fmt.Fprintf(w, "%s\t%s\n", deployment.Namespace, deployment.Name)
				return nil
			}

			switch cfg.Confirm {
			case true:
				deploymentPruneFunc = func(deployment *kapi.ReplicationController) error {
					describingPruneDeploymentFunc(deployment)
					// If the deployment is failed we need to remove its deployer pods, too.
					if deployutil.DeploymentStatusFor(deployment) == deployapi.DeploymentStatusFailed {
						dpSelector := deployutil.DeployerPodSelector(deployment.Name)
						deployers, err := kclient.Pods(deployment.Namespace).List(kapi.ListOptions{LabelSelector: dpSelector})
						if err != nil {
							fmt.Fprintf(os.Stderr, "Cannot list deployer pods for %q: %v\n", deployment.Name, err)
						} else {
							for _, pod := range deployers.Items {
								if err := kclient.Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
									fmt.Fprintf(os.Stderr, "Cannot remove deployer pod %q: %v\n", pod.Name, err)
								}
							}
						}
					}
					return kclient.ReplicationControllers(deployment.Namespace).Delete(deployment.Name)
				}
			default:
				fmt.Fprintln(os.Stderr, "Dry run enabled - no modifications will be made. Add --confirm to remove deployments")
				deploymentPruneFunc = describingPruneDeploymentFunc
			}

			fmt.Fprintln(w, "NAMESPACE\tNAME")
			pruneTask := prune.NewPruneTasker(deploymentConfigs, deployments, cfg.KeepYoungerThan, cfg.Orphans, cfg.KeepComplete, cfg.KeepFailed, deploymentPruneFunc)
			err = pruneTask.PruneTask()
			if err != nil {
				cmdutil.CheckErr(err)
			}
		},
	}

	cmd.Flags().BoolVar(&cfg.Confirm, "confirm", cfg.Confirm, "Specify that deployment pruning should proceed. Defaults to false, displaying what would be deleted but not actually deleting anything.")
	cmd.Flags().BoolVar(&cfg.Orphans, "orphans", cfg.Orphans, "Prune all deployments where the associated DeploymentConfig no longer exists, the status is complete or failed, and the replica size is 0.")
	cmd.Flags().DurationVar(&cfg.KeepYoungerThan, "keep-younger-than", cfg.KeepYoungerThan, "Specify the minimum age of a deployment for it to be considered a candidate for pruning.")
	cmd.Flags().IntVar(&cfg.KeepComplete, "keep-complete", cfg.KeepComplete, "Per DeploymentConfig, specify the number of deployments whose status is complete that will be preserved whose replica size is 0.")
	cmd.Flags().IntVar(&cfg.KeepFailed, "keep-failed", cfg.KeepFailed, "Per DeploymentConfig, specify the number of deployments whose status is failed that will be preserved whose replica size is 0.")

	return cmd
}
Пример #10
0
// Handle syncs pod's status with any associated deployment.
func (c *DeployerPodController) Handle(pod *kapi.Pod) error {
	// Find the deployment associated with the deployer pod.
	deploymentName := deployutil.DeploymentNameFor(pod)
	if len(deploymentName) == 0 {
		return nil
	}
	// Reject updates to anything but the main deployer pod
	// TODO: Find a way to filter this on the watch side.
	if pod.Name != deployutil.DeployerPodNameForDeployment(deploymentName) {
		return nil
	}

	deployment := &kapi.ReplicationController{ObjectMeta: kapi.ObjectMeta{Namespace: pod.Namespace, Name: deploymentName}}
	cached, exists, err := c.store.Get(deployment)
	if err == nil && exists {
		// Try to use the cache first. Trust hits and return them.
		deployment = cached.(*kapi.ReplicationController)
	} else {
		// Double-check with the master for cache misses/errors, since those
		// are rare and API calls are expensive but more reliable.
		deployment, err = c.kClient.ReplicationControllers(pod.Namespace).Get(deploymentName)
	}

	// If the deployment for this pod has disappeared, we should clean up this
	// and any other deployer pods, then bail out.
	if err != nil {
		// Some retrieval error occurred. Retry.
		if !kerrors.IsNotFound(err) {
			return fmt.Errorf("couldn't get deployment %s/%s which owns deployer pod %s/%s", pod.Namespace, deploymentName, pod.Name, pod.Namespace)
		}
		// Find all the deployer pods for the deployment (including this one).
		opts := kapi.ListOptions{LabelSelector: deployutil.DeployerPodSelector(deploymentName)}
		deployers, err := c.kClient.Pods(pod.Namespace).List(opts)
		if err != nil {
			// Retry.
			return fmt.Errorf("couldn't get deployer pods for %s: %v", deployutil.LabelForDeployment(deployment), err)
		}
		// Delete all deployers.
		for _, deployer := range deployers.Items {
			err := c.kClient.Pods(deployer.Namespace).Delete(deployer.Name, kapi.NewDeleteOptions(0))
			if err != nil {
				if !kerrors.IsNotFound(err) {
					// TODO: Should this fire an event?
					glog.V(2).Infof("Couldn't delete orphaned deployer pod %s/%s: %v", deployer.Namespace, deployer.Name, err)
				}
			} else {
				// TODO: Should this fire an event?
				glog.V(2).Infof("Deleted orphaned deployer pod %s/%s", deployer.Namespace, deployer.Name)
			}
		}
		return nil
	}

	currentStatus := deployutil.DeploymentStatusFor(deployment)
	nextStatus := currentStatus

	switch pod.Status.Phase {
	case kapi.PodRunning:
		nextStatus = deployapi.DeploymentStatusRunning

	case kapi.PodSucceeded:
		nextStatus = deployapi.DeploymentStatusComplete

		config, decodeErr := c.decodeConfig(deployment)
		// If the deployment was cancelled just prior to the deployer pod succeeding
		// then we need to remove the cancel annotations from the complete deployment
		// and emit an event letting users know their cancellation failed.
		if deployutil.IsDeploymentCancelled(deployment) {
			delete(deployment.Annotations, deployapi.DeploymentCancelledAnnotation)
			delete(deployment.Annotations, deployapi.DeploymentStatusReasonAnnotation)

			if decodeErr == nil {
				c.recorder.Eventf(config, kapi.EventTypeWarning, "FailedCancellation", "Deployment %q succeeded before cancel recorded", deployutil.LabelForDeployment(deployment))
			} else {
				c.recorder.Event(deployment, kapi.EventTypeWarning, "FailedCancellation", "Succeeded before cancel recorded")
			}
		}
		// Sync the internal replica annotation with the target so that we can
		// distinguish deployer updates from other scaling events.
		deployment.Annotations[deployapi.DeploymentReplicasAnnotation] = deployment.Annotations[deployapi.DesiredReplicasAnnotation]
		if nextStatus == deployapi.DeploymentStatusComplete {
			delete(deployment.Annotations, deployapi.DesiredReplicasAnnotation)
		}

		// reset the size of any test container, since we are the ones updating the RC
		if decodeErr == nil && config.Spec.Test {
			deployment.Spec.Replicas = 0
		}
	case kapi.PodFailed:
		nextStatus = deployapi.DeploymentStatusFailed

		// reset the size of any test container, since we are the ones updating the RC
		if config, err := c.decodeConfig(deployment); err == nil && config.Spec.Test {
			deployment.Spec.Replicas = 0
		}
	}

	if deployutil.CanTransitionPhase(currentStatus, nextStatus) {
		deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus)
		if _, err := c.kClient.ReplicationControllers(deployment.Namespace).Update(deployment); err != nil {
			if kerrors.IsNotFound(err) {
				return nil
			}
			return fmt.Errorf("couldn't update Deployment %s to status %s: %v", deployutil.LabelForDeployment(deployment), nextStatus, err)
		}
		glog.V(4).Infof("Updated deployment %s status from %s to %s (scale: %d)", deployutil.LabelForDeployment(deployment), currentStatus, nextStatus, deployment.Spec.Replicas)

		// If the deployment was canceled, trigger a reconcilation of its deployment config
		// so that the latest complete deployment can immediately rollback in place of the
		// canceled deployment.
		if nextStatus == deployapi.DeploymentStatusFailed && deployutil.IsDeploymentCancelled(deployment) {
			// If we are unable to get the deployment config, then the deploymentconfig controller will
			// perform its duties once the resync interval forces the deploymentconfig to be reconciled.
			name := deployutil.DeploymentConfigNameFor(deployment)
			kclient.RetryOnConflict(kclient.DefaultRetry, func() error {
				config, err := c.client.DeploymentConfigs(deployment.Namespace).Get(name)
				if err != nil {
					return err
				}
				if config.Annotations == nil {
					config.Annotations = make(map[string]string)
				}
				config.Annotations[deployapi.DeploymentCancelledAnnotation] = strconv.Itoa(config.Status.LatestVersion)
				_, err = c.client.DeploymentConfigs(config.Namespace).Update(config)
				return err
			})
		}
	}

	return nil
}