Example #1
0
// deploy launches a new deployment unless there's already a deployment
// process in progress for config.
func (o DeployOptions) deploy(config *deployapi.DeploymentConfig, out io.Writer) error {
	if config.Spec.Paused {
		return fmt.Errorf("cannot deploy a paused deployment config")
	}
	// TODO: This implies that deploymentconfig.status.latestVersion is always synced. Currently,
	// that's the case because clients (oc, trigger controllers) are updating the status directly.
	// Clients should be acting either on spec or on annotations and status updates should be a
	// responsibility of the main controller. We need to start by unplugging this assumption from
	// our client tools.
	deploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName)
	if err == nil {
		// Reject attempts to start a concurrent deployment.
		status := deployutil.DeploymentStatusFor(deployment)
		if status != deployapi.DeploymentStatusComplete && status != deployapi.DeploymentStatusFailed {
			return fmt.Errorf("#%d is already in progress (%s).\nOptionally, you can cancel this deployment using the --cancel option.", config.Status.LatestVersion, status)
		}
	} else {
		if !kerrors.IsNotFound(err) {
			return err
		}
	}

	config.Status.LatestVersion++
	dc, err := o.osClient.DeploymentConfigs(config.Namespace).Update(config)
	if err != nil {
		return err
	}
	fmt.Fprintf(out, "Started deployment #%d\n", dc.Status.LatestVersion)
	fmt.Fprintf(out, "Use '%s logs -f dc/%s' to track its progress.\n", o.baseCommandName, dc.Name)
	return nil
}
Example #2
0
func (c *DeploymentConfigController) calculateStatus(config deployapi.DeploymentConfig, deployments []kapi.ReplicationController) (deployapi.DeploymentConfigStatus, error) {
	selector := labels.Set(config.Spec.Selector).AsSelector()
	pods, err := c.podStore.Pods(config.Namespace).List(selector)
	if err != nil {
		return config.Status, err
	}
	available := deployutil.GetAvailablePods(pods, config.Spec.MinReadySeconds)

	// UpdatedReplicas represents the replicas that use the deployment config template which means
	// we should inform about the replicas of the latest deployment and not the active.
	latestReplicas := int32(0)
	for _, deployment := range deployments {
		if deployment.Name == deployutil.LatestDeploymentNameForConfig(&config) {
			updatedDeployment := []kapi.ReplicationController{deployment}
			latestReplicas = deployutil.GetStatusReplicaCountForDeployments(updatedDeployment)
			break
		}
	}

	total := deployutil.GetReplicaCountForDeployments(deployments)

	return deployapi.DeploymentConfigStatus{
		LatestVersion:       config.Status.LatestVersion,
		Details:             config.Status.Details,
		ObservedGeneration:  config.Generation,
		Replicas:            deployutil.GetStatusReplicaCountForDeployments(deployments),
		UpdatedReplicas:     latestReplicas,
		AvailableReplicas:   available,
		UnavailableReplicas: total - available,
	}, nil
}
Example #3
0
// Scale updates the DeploymentConfig with the provided namespace/name, to a
// new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for its
// deployment replica count to reach the new value (if wait is not nil).
func (scaler *DeploymentConfigScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, waitForReplicas *kubectl.RetryParams) error {
	if preconditions == nil {
		preconditions = &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}
	}
	if retry == nil {
		// Make it try only once, immediately
		retry = &kubectl.RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
	}
	cond := kubectl.ScaleCondition(scaler, preconditions, namespace, name, newSize)
	if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
		return err
	}
	if waitForReplicas != nil {
		dc, err := scaler.dcClient.DeploymentConfigs(namespace).Get(name)
		if err != nil {
			return err
		}
		rc, err := scaler.rcClient.ReplicationControllers(namespace).Get(util.LatestDeploymentNameForConfig(dc))
		if err != nil {
			return err
		}
		return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, controllerHasSpecifiedReplicas(scaler.clientInterface, rc, dc.Template.ControllerTemplate.Replicas))
	}
	return nil
}
Example #4
0
// GetReplicationController returns the most recent replication controller associated with the deploymentConfig
// with the provided namespace/name combination
func (c *realScalerClient) GetReplicationController(namespace, name string) (*kapi.ReplicationController, error) {
	dc, err := c.oc.DeploymentConfigs(namespace).Get(name)
	if err != nil {
		return nil, err
	}
	return c.kc.ReplicationControllers(namespace).Get(util.LatestDeploymentNameForConfig(dc))
}
Example #5
0
// retry resets the status of the latest deployment to New, which will cause
// the deployment to be retried. An error is returned if the deployment is not
// currently in a failed state.
func (o DeployOptions) retry(config *deployapi.DeploymentConfig) error {
	if config.Spec.Paused {
		return fmt.Errorf("cannot retry a paused deployment config")
	}
	if config.Status.LatestVersion == 0 {
		return fmt.Errorf("no deployments found for %s/%s", config.Namespace, config.Name)
	}
	// TODO: This implies that deploymentconfig.status.latestVersion is always synced. Currently,
	// that's the case because clients (oc, trigger controllers) are updating the status directly.
	// Clients should be acting either on spec or on annotations and status updates should be a
	// responsibility of the main controller. We need to start by unplugging this assumption from
	// our client tools.
	deploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName)
	if err != nil {
		if kerrors.IsNotFound(err) {
			return fmt.Errorf("unable to find the latest deployment (#%d).\nYou can start a new deployment with 'oc deploy --latest dc/%s'.", config.Status.LatestVersion, config.Name)
		}
		return err
	}

	if !deployutil.IsFailedDeployment(deployment) {
		message := fmt.Sprintf("#%d is %s; only failed deployments can be retried.\n", config.Status.LatestVersion, deployutil.DeploymentStatusFor(deployment))
		if deployutil.IsCompleteDeployment(deployment) {
			message += fmt.Sprintf("You can start a new deployment with 'oc deploy --latest dc/%s'.", config.Name)
		} else {
			message += fmt.Sprintf("Optionally, you can cancel this deployment with 'oc deploy --cancel dc/%s'.", config.Name)
		}

		return fmt.Errorf(message)
	}

	// Delete the deployer pod as well as the deployment hooks pods, if any
	pods, err := o.kubeClient.Pods(config.Namespace).List(kapi.ListOptions{LabelSelector: deployutil.DeployerPodSelector(deploymentName)})
	if err != nil {
		return fmt.Errorf("failed to list deployer/hook pods for deployment #%d: %v", config.Status.LatestVersion, err)
	}
	for _, pod := range pods.Items {
		err := o.kubeClient.Pods(pod.Namespace).Delete(pod.Name, kapi.NewDeleteOptions(0))
		if err != nil {
			return fmt.Errorf("failed to delete deployer/hook pod %s for deployment #%d: %v", pod.Name, config.Status.LatestVersion, err)
		}
	}

	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)
	// clear out the cancellation flag as well as any previous status-reason annotation
	delete(deployment.Annotations, deployapi.DeploymentStatusReasonAnnotation)
	delete(deployment.Annotations, deployapi.DeploymentCancelledAnnotation)
	_, err = o.kubeClient.ReplicationControllers(deployment.Namespace).Update(deployment)
	if err != nil {
		return err
	}
	fmt.Fprintf(o.out, "Retried #%d\n", config.Status.LatestVersion)
	if o.follow {
		return o.getLogs(config)
	}
	fmt.Fprintf(o.out, "Use '%s logs -f dc/%s' to track its progress.\n", o.baseCommandName, config.Name)
	return nil
}
Example #6
0
// Handle processes change triggers for config.
func (c *DeploymentConfigChangeController) Handle(config *deployapi.DeploymentConfig) error {
	if !deployutil.HasChangeTrigger(config) {
		glog.V(5).Infof("Ignoring DeploymentConfig %s; no change triggers detected", deployutil.LabelForDeploymentConfig(config))
		return nil
	}

	if config.Status.LatestVersion == 0 {
		_, _, abort, err := c.generateDeployment(config)
		if err != nil {
			if kerrors.IsConflict(err) {
				return fatalError(fmt.Sprintf("DeploymentConfig %s updated since retrieval; aborting trigger: %v", deployutil.LabelForDeploymentConfig(config), err))
			}
			glog.V(4).Infof("Couldn't create initial deployment for deploymentConfig %q: %v", deployutil.LabelForDeploymentConfig(config), err)
			return nil
		}
		if !abort {
			glog.V(4).Infof("Created initial deployment for deploymentConfig %q", deployutil.LabelForDeploymentConfig(config))
		}
		return nil
	}

	latestDeploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := c.changeStrategy.getDeployment(config.Namespace, latestDeploymentName)
	if err != nil {
		// If there's no deployment for the latest config, we have no basis of
		// comparison. It's the responsibility of the deployment config controller
		// to make the deployment for the config, so return early.
		if kerrors.IsNotFound(err) {
			glog.V(5).Infof("Ignoring change for DeploymentConfig %s; no existing Deployment found", deployutil.LabelForDeploymentConfig(config))
			return nil
		}
		return fmt.Errorf("couldn't retrieve Deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}

	deployedConfig, err := c.decodeConfig(deployment)
	if err != nil {
		return fatalError(fmt.Sprintf("error decoding DeploymentConfig from Deployment %s for DeploymentConfig %s: %v", deployutil.LabelForDeployment(deployment), deployutil.LabelForDeploymentConfig(config), err))
	}

	// Detect template diffs, and return early if there aren't any changes.
	if kapi.Semantic.DeepEqual(config.Spec.Template, deployedConfig.Spec.Template) {
		glog.V(5).Infof("Ignoring DeploymentConfig change for %s (latestVersion=%d); same as Deployment %s", deployutil.LabelForDeploymentConfig(config), config.Status.LatestVersion, deployutil.LabelForDeployment(deployment))
		return nil
	}

	// There was a template diff, so generate a new config version.
	fromVersion, toVersion, abort, err := c.generateDeployment(config)
	if err != nil {
		if kerrors.IsConflict(err) {
			return fatalError(fmt.Sprintf("DeploymentConfig %s updated since retrieval; aborting trigger: %v", deployutil.LabelForDeploymentConfig(config), err))
		}
		return fmt.Errorf("couldn't generate deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}
	if !abort {
		glog.V(4).Infof("Updated DeploymentConfig %s from version %d to %d for existing deployment %s", deployutil.LabelForDeploymentConfig(config), fromVersion, toVersion, deployutil.LabelForDeployment(deployment))
	}
	return nil
}
Example #7
0
// Describe returns the description of a DeploymentConfig
func (d *DeploymentConfigDescriber) Describe(namespace, name string) (string, error) {
	deploymentConfig, err := d.client.getDeploymentConfig(namespace, name)
	if err != nil {
		return "", err
	}
	events, err := d.client.listEvents(deploymentConfig)
	if err != nil {
		return "", err
	}

	return tabbedString(func(out *tabwriter.Writer) error {
		formatMeta(out, deploymentConfig.ObjectMeta)

		if deploymentConfig.Status.LatestVersion == 0 {
			formatString(out, "Latest Version", "Not deployed")
		} else {
			formatString(out, "Latest Version", strconv.Itoa(deploymentConfig.Status.LatestVersion))
		}

		printTriggers(deploymentConfig.Spec.Triggers, out)

		formatString(out, "Strategy", deploymentConfig.Spec.Strategy.Type)
		printStrategy(deploymentConfig.Spec.Strategy, out)
		printDeploymentConfigSpec(deploymentConfig.Spec, out)
		if deploymentConfig.Status.Details != nil && len(deploymentConfig.Status.Details.Message) > 0 {
			fmt.Fprintf(out, "Warning:\t%s\n", deploymentConfig.Status.Details.Message)
		}
		deploymentName := deployutil.LatestDeploymentNameForConfig(deploymentConfig)
		deployment, err := d.client.getDeployment(namespace, deploymentName)
		if err != nil {
			if kerrors.IsNotFound(err) {
				formatString(out, "Latest Deployment", "<none>")
			} else {
				formatString(out, "Latest Deployment", fmt.Sprintf("error: %v", err))
			}
		} else {
			header := fmt.Sprintf("Deployment #%d (latest)", deployutil.DeploymentVersionFor(deployment))
			printDeploymentRc(deployment, d.client, out, header, true)
		}
		deploymentsHistory, err := d.client.listDeployments(namespace, labels.Everything())
		if err == nil {
			sorted := rcSorter{}
			sorted = append(sorted, deploymentsHistory.Items...)
			sort.Sort(sorted)
			for _, item := range sorted {
				if item.Name != deploymentName && deploymentConfig.Name == deployutil.DeploymentConfigNameFor(&item) {
					header := fmt.Sprintf("Deployment #%d", deployutil.DeploymentVersionFor(&item))
					printDeploymentRc(&item, d.client, out, header, false)
				}
			}
		}

		if events != nil {
			kctl.DescribeEvents(events, out)
		}
		return nil
	})
}
Example #8
0
func (o RolloutLatestOptions) RunRolloutLatest() error {
	info := o.infos[0]
	config, ok := info.Object.(*deployapi.DeploymentConfig)
	if !ok {
		return fmt.Errorf("%s is not a deployment config", info.Name)
	}

	// TODO: Consider allowing one-off deployments for paused configs
	// See https://github.com/openshift/origin/issues/9903
	if config.Spec.Paused {
		return fmt.Errorf("cannot deploy a paused deployment config")
	}

	deploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := o.kc.ReplicationControllers(config.Namespace).Get(deploymentName)
	switch {
	case err == nil:
		// Reject attempts to start a concurrent deployment.
		if !deployutil.IsTerminatedDeployment(deployment) {
			status := deployutil.DeploymentStatusFor(deployment)
			return fmt.Errorf("#%d is already in progress (%s).", config.Status.LatestVersion, status)
		}
	case !kerrors.IsNotFound(err):
		return err
	}

	dc := config
	if !o.DryRun {
		request := &deployapi.DeploymentRequest{
			Name:   config.Name,
			Latest: !o.again,
			Force:  true,
		}

		dc, err = o.oc.DeploymentConfigs(config.Namespace).Instantiate(request)

		// Pre 1.4 servers don't support the instantiate endpoint. Fallback to incrementing
		// latestVersion on them.
		if kerrors.IsNotFound(err) || kerrors.IsForbidden(err) {
			config.Status.LatestVersion++
			dc, err = o.oc.DeploymentConfigs(config.Namespace).Update(config)
		}

		if err != nil {
			return err
		}

		info.Refresh(dc, true)
	}

	if o.output == "revision" {
		fmt.Fprintf(o.out, fmt.Sprintf("%d", dc.Status.LatestVersion))
		return nil
	}

	kcmdutil.PrintSuccess(o.mapper, o.output == "name", o.out, info.Mapping.Resource, info.Name, o.DryRun, "rolled out")
	return nil
}
Example #9
0
// decodeFromLatest will try to return the decoded version of the current deploymentconfig found
// in the annotations of its latest deployment. If there is no previous deploymentconfig (ie.
// latestVersion == 0), the returned deploymentconfig will be the same.
func (c *DeploymentConfigChangeController) decodeFromLatest(config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {
	if config.Status.LatestVersion == 0 {
		return config, nil
	}

	latestDeploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := c.kClient.ReplicationControllers(config.Namespace).Get(latestDeploymentName)
	if err != nil {
		// If there's no deployment for the latest config, we have no basis of
		// comparison. It's the responsibility of the deployment config controller
		// to make the deployment for the config, so return early.
		return nil, fmt.Errorf("couldn't retrieve deployment for deployment config %q: %v", deployutil.LabelForDeploymentConfig(config), err)
	}

	return c.decodeConfig(deployment)
}
Example #10
0
func (c *DeploymentTriggerController) updateDeploymentConfig(old, cur interface{}) {
	newDc := cur.(*deployapi.DeploymentConfig)
	oldDc := old.(*deployapi.DeploymentConfig)

	// A periodic relist will send update events for all known deployment configs.
	if newDc.ResourceVersion == oldDc.ResourceVersion {
		return
	}
	// No need to enqueue deployment configs that have no triggers or are paused.
	if len(newDc.Spec.Triggers) == 0 || newDc.Spec.Paused {
		return
	}
	// We don't want to compete with the main deployment config controller. Let's process this
	// config once it's synced. Note that this does not eliminate conflicts between the two
	// controllers because the main controller is constantly updating deployment configs as
	// owning replication controllers and pods are updated.
	if !deployutil.HasSynced(newDc, newDc.Generation) {
		return
	}
	// Enqueue the deployment config if it hasn't been deployed yet.
	if newDc.Status.LatestVersion == 0 {
		c.enqueueDeploymentConfig(newDc)
		return
	}
	// Compare deployment config templates before enqueueing. This reduces the amount of times
	// we will try to instantiate a deployment config at the expense of duplicating some of the
	// work that the instantiate endpoint is already doing but I think this is fine.
	shouldInstantiate := true
	latestRc, err := c.rcLister.ReplicationControllers(newDc.Namespace).Get(deployutil.LatestDeploymentNameForConfig(newDc))
	if err != nil {
		// If we get an error here it may be due to the rc cache lagging behind. In such a case
		// just defer to the api server (instantiate REST) where we will retry this.
		glog.V(2).Infof("Cannot get latest rc for dc %s:%d (%v) - will defer to instantiate", deployutil.LabelForDeploymentConfig(newDc), newDc.Status.LatestVersion, err)
	} else {
		initial, err := deployutil.DecodeDeploymentConfig(latestRc, c.codec)
		if err != nil {
			glog.V(2).Infof("Cannot decode dc from replication controller %s: %v", deployutil.LabelForDeployment(latestRc), err)
			return
		}
		shouldInstantiate = !reflect.DeepEqual(newDc.Spec.Template, initial.Spec.Template)
	}
	if !shouldInstantiate {
		return
	}

	c.enqueueDeploymentConfig(newDc)
}
Example #11
0
// deploy launches a new deployment unless there's already a deployment
// process in progress for config.
func (o DeployOptions) deploy(config *deployapi.DeploymentConfig) error {
	if config.Spec.Paused {
		return fmt.Errorf("cannot deploy a paused deployment config")
	}
	// TODO: This implies that deploymentconfig.status.latestVersion is always synced. Currently,
	// that's the case because clients (oc, trigger controllers) are updating the status directly.
	// Clients should be acting either on spec or on annotations and status updates should be a
	// responsibility of the main controller. We need to start by unplugging this assumption from
	// our client tools.
	deploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName)
	if err == nil && !deployutil.IsTerminatedDeployment(deployment) {
		// Reject attempts to start a concurrent deployment.
		return fmt.Errorf("#%d is already in progress (%s).\nOptionally, you can cancel this deployment using the --cancel option.",
			config.Status.LatestVersion, deployutil.DeploymentStatusFor(deployment))
	}
	if err != nil && !kerrors.IsNotFound(err) {
		return err
	}

	request := &deployapi.DeploymentRequest{
		Name:   config.Name,
		Latest: false,
		Force:  true,
	}

	dc, err := o.osClient.DeploymentConfigs(config.Namespace).Instantiate(request)
	// Pre 1.4 servers don't support the instantiate endpoint. Fallback to incrementing
	// latestVersion on them.
	if kerrors.IsNotFound(err) || kerrors.IsForbidden(err) {
		config.Status.LatestVersion++
		dc, err = o.osClient.DeploymentConfigs(config.Namespace).Update(config)
	}
	if err != nil {
		if kerrors.IsBadRequest(err) {
			err = fmt.Errorf("%v - try 'oc rollout latest dc/%s'", err, config.Name)
		}
		return err
	}
	fmt.Fprintf(o.out, "Started deployment #%d\n", dc.Status.LatestVersion)
	if o.follow {
		return o.getLogs(dc)
	}
	fmt.Fprintf(o.out, "Use '%s logs -f dc/%s' to track its progress.\n", o.baseCommandName, dc.Name)
	return nil
}
Example #12
0
// retry resets the status of the latest deployment to New, which will cause
// the deployment to be retried. An error is returned if the deployment is not
// currently in a failed state.
func (o *DeployOptions) retry(config *deployapi.DeploymentConfig, out io.Writer) error {
	if config.LatestVersion == 0 {
		return fmt.Errorf("no deployments found for %s/%s", config.Namespace, config.Name)
	}
	deploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName)
	if err != nil {
		if kerrors.IsNotFound(err) {
			return fmt.Errorf("Unable to find the latest deployment (#%d).\nYou can start a new deployment using the --latest option.", config.LatestVersion)
		}
		return err
	}

	if status := deployutil.DeploymentStatusFor(deployment); status != deployapi.DeploymentStatusFailed {
		message := fmt.Sprintf("#%d is %s; only failed deployments can be retried.\n", config.LatestVersion, status)
		if status == deployapi.DeploymentStatusComplete {
			message += "You can start a new deployment using the --latest option."
		} else {
			message += "Optionally, you can cancel this deployment using the --cancel option."
		}

		return fmt.Errorf(message)
	}

	// Delete the deployer pod as well as the deployment hooks pods, if any
	pods, err := o.kubeClient.Pods(config.Namespace).List(deployutil.DeployerPodSelector(deploymentName), fields.Everything())
	if err != nil {
		return fmt.Errorf("Failed to list deployer/hook pods for deployment #%d: %v", config.LatestVersion, err)
	}
	for _, pod := range pods.Items {
		err := o.kubeClient.Pods(pod.Namespace).Delete(pod.Name, kapi.NewDeleteOptions(0))
		if err != nil {
			return fmt.Errorf("Failed to delete deployer/hook pod %s for deployment #%d: %v", pod.Name, config.LatestVersion, err)
		}
	}

	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)
	// clear out the cancellation flag as well as any previous status-reason annotation
	delete(deployment.Annotations, deployapi.DeploymentStatusReasonAnnotation)
	delete(deployment.Annotations, deployapi.DeploymentCancelledAnnotation)
	_, err = o.kubeClient.ReplicationControllers(deployment.Namespace).Update(deployment)
	if err == nil {
		fmt.Fprintf(out, "retried #%d\n", config.LatestVersion)
	}
	return err
}
Example #13
0
// ApproximatePodTemplateForObject returns a pod template object for the provided source.
// It may return both an error and a object. It attempt to return the best possible template
// avaliable at the current time.
func (w *Factory) ApproximatePodTemplateForObject(object runtime.Object) (*api.PodTemplateSpec, error) {
	switch t := object.(type) {
	case *deployapi.DeploymentConfig:
		fallback := t.Spec.Template

		_, kc, err := w.Clients()
		if err != nil {
			return fallback, err
		}

		latestDeploymentName := deployutil.LatestDeploymentNameForConfig(t)
		deployment, err := kc.ReplicationControllers(t.Namespace).Get(latestDeploymentName)
		if err != nil {
			return fallback, err
		}

		fallback = deployment.Spec.Template

		pods, err := kc.Pods(deployment.Namespace).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(deployment.Spec.Selector)})
		if err != nil {
			return fallback, err
		}

		for i := range pods.Items {
			pod := &pods.Items[i]
			if fallback == nil || pod.CreationTimestamp.Before(fallback.CreationTimestamp) {
				fallback = &api.PodTemplateSpec{
					ObjectMeta: pod.ObjectMeta,
					Spec:       pod.Spec,
				}
			}
		}
		return fallback, nil

	default:
		pod, err := w.AttachablePodForObject(object)
		if pod != nil {
			return &api.PodTemplateSpec{
				ObjectMeta: pod.ObjectMeta,
				Spec:       pod.Spec,
			}, err
		}
		return nil, err
	}
}
Example #14
0
// Describe returns the description of the latest deployments for a config
func (d *LatestDeploymentsDescriber) Describe(namespace, name string) (string, error) {
	var f formatter

	config, err := d.client.getDeploymentConfig(namespace, name)
	if err != nil {
		return "", err
	}

	var deployments []kapi.ReplicationController
	if d.count == -1 || d.count > 1 {
		list, err := d.client.listDeployments(namespace, labels.Everything())
		if err != nil && !kerrors.IsNotFound(err) {
			return "", err
		}
		deployments = list.Items
	} else {
		deploymentName := deployutil.LatestDeploymentNameForConfig(config)
		deployment, err := d.client.getDeployment(config.Namespace, deploymentName)
		if err != nil && !kerrors.IsNotFound(err) {
			return "", err
		}
		if deployment != nil {
			deployments = []kapi.ReplicationController{*deployment}
		}
	}

	g := graph.New()
	dcNode := deploygraph.EnsureDeploymentConfigNode(g, config)
	for i := range deployments {
		kubegraph.EnsureReplicationControllerNode(g, &deployments[i])
	}
	deployedges.AddTriggerEdges(g, dcNode)
	deployedges.AddDeploymentEdges(g, dcNode)
	activeDeployment, inactiveDeployments := deployedges.RelevantDeployments(g, dcNode)

	return tabbedString(func(out *tabwriter.Writer) error {
		descriptions := describeDeployments(f, dcNode, activeDeployment, inactiveDeployments, d.count)
		for i, description := range descriptions {
			descriptions[i] = fmt.Sprintf("%v %v", name, description)
		}
		printLines(out, "", 0, descriptions...)
		return nil
	})
}
Example #15
0
// decodeFromLatestDeployment will try to return the decoded version of the current deploymentconfig
// found in the annotations of its latest deployment. If there is no previous deploymentconfig (ie.
// latestVersion == 0), the returned deploymentconfig will be the same.
func decodeFromLatestDeployment(config *deployapi.DeploymentConfig, rn kclient.ReplicationControllersNamespacer, decoder runtime.Decoder) (*deployapi.DeploymentConfig, error) {
	if config.Status.LatestVersion == 0 {
		return config, nil
	}

	latestDeploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := rn.ReplicationControllers(config.Namespace).Get(latestDeploymentName)
	if err != nil {
		// If there's no deployment for the latest config, we have no basis of
		// comparison. It's the responsibility of the deployment config controller
		// to make the deployment for the config, so return early.
		return nil, err
	}
	decoded, err := deployutil.DecodeDeploymentConfig(deployment, decoder)
	if err != nil {
		return nil, errors.NewInternalError(err)
	}
	return decoded, nil
}
Example #16
0
// Describe returns the description of the latest deployments for a config
func (d *LatestDeploymentsDescriber) Describe(namespace, name string) (string, error) {
	config, err := d.client.getDeploymentConfig(namespace, name)
	if err != nil {
		return "", err
	}

	var deployments []kapi.ReplicationController
	if d.count == -1 || d.count > 1 {
		list, err := d.client.listDeployments(namespace, labels.Everything())
		if err != nil && !kerrors.IsNotFound(err) {
			return "", err
		}
		deployments = list.Items
	} else {
		deploymentName := deployutil.LatestDeploymentNameForConfig(config)
		deployment, err := d.client.getDeployment(config.Namespace, deploymentName)
		if err != nil && !kerrors.IsNotFound(err) {
			return "", err
		}
		if deployment != nil {
			deployments = []kapi.ReplicationController{*deployment}
		}
	}

	g := graph.New()
	deploy := graph.DeploymentConfig(g, config)
	if len(deployments) > 0 {
		graph.JoinDeployments(deploy.(*graph.DeploymentConfigNode), deployments)
	}

	return tabbedString(func(out *tabwriter.Writer) error {
		node := deploy.(*graph.DeploymentConfigNode)
		descriptions := describeDeployments(node, d.count)
		for i, description := range descriptions {
			descriptions[i] = fmt.Sprintf("%v %v", name, description)
		}
		printLines(out, "", 0, descriptions...)
		return nil
	})
}
Example #17
0
// deploy launches a new deployment unless there's already a deployment
// process in progress for config.
func (c *deployLatestCommand) deploy(config *deployapi.DeploymentConfig, out io.Writer) error {
	deploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := c.client.GetDeployment(config.Namespace, deploymentName)
	if err != nil {
		if !kerrors.IsNotFound(err) {
			return err
		}
	} else {
		// Reject attempts to start a concurrent deployment.
		status := deployutil.DeploymentStatusFor(deployment)
		if status != deployapi.DeploymentStatusComplete && status != deployapi.DeploymentStatusFailed {
			return fmt.Errorf("#%d is already in progress (%s).\nOptionally, you can cancel this deployment using the --cancel option.", config.LatestVersion, status)
		}
	}

	config.LatestVersion++
	_, err = c.client.UpdateDeploymentConfig(config)
	if err == nil {
		fmt.Fprintf(out, "Started deployment #%d\n", config.LatestVersion)
	}
	return err
}
Example #18
0
// deploy launches a new deployment unless there's already a deployment
// process in progress for config.
func (o *DeployOptions) deploy(config *deployapi.DeploymentConfig, out io.Writer) error {
	deploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := o.kubeClient.ReplicationControllers(config.Namespace).Get(deploymentName)
	if err != nil {
		if kerrors.IsNotFound(err) {
			return fmt.Errorf("couldn't find deployment %q", deploymentName)
		}
		return err
	}
	// Reject attempts to start a concurrent deployment.
	status := deployutil.DeploymentStatusFor(deployment)
	if status != deployapi.DeploymentStatusComplete && status != deployapi.DeploymentStatusFailed {
		return fmt.Errorf("#%d is already in progress (%s).\nOptionally, you can cancel this deployment using the --cancel option.", config.LatestVersion, status)
	}

	config.LatestVersion++
	_, err = o.osClient.DeploymentConfigs(config.Namespace).Update(config)
	if err == nil {
		fmt.Fprintf(out, "Started deployment #%d\n", config.LatestVersion)
	}
	return err
}
Example #19
0
// Scale updates a replication controller created by the DeploymentConfig with the provided namespace/name,
// to a new size, with optional precondition check (if preconditions is not nil),optional retries (if retry
//  is not nil), and then optionally waits for its replica count to reach the new value (if wait is not nil).
func (scaler *DeploymentConfigScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, waitForReplicas *kubectl.RetryParams) error {
	if preconditions == nil {
		preconditions = &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}
	}
	if retry == nil {
		// Make it try only once, immediately
		retry = &kubectl.RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
	}
	cond := kubectl.ScaleCondition(scaler, preconditions, namespace, name, newSize)
	if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
		if scaleErr, ok := err.(kubectl.ControllerScaleError); ok && kerrors.IsNotFound(scaleErr.ActualError) {
			glog.Infof("No deployment found for dc/%s. Scaling the deployment configuration template...", name)
			dc, err := scaler.dcClient.DeploymentConfigs(namespace).Get(name)
			if err != nil {
				return err
			}
			dc.Template.ControllerTemplate.Replicas = int(newSize)

			if _, err := scaler.dcClient.DeploymentConfigs(namespace).Update(dc); err != nil {
				return err
			}
			return nil
		}
		return err
	}
	if waitForReplicas != nil {
		dc, err := scaler.dcClient.DeploymentConfigs(namespace).Get(name)
		if err != nil {
			return err
		}
		rc, err := scaler.rcClient.ReplicationControllers(namespace).Get(util.LatestDeploymentNameForConfig(dc))
		if err != nil {
			return err
		}
		return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, kclient.ControllerHasDesiredReplicas(scaler.clientInterface, rc))
	}
	return nil
}
Example #20
0
// ScaleSimple does a simple one-shot attempt at scaling - not useful on it's own, but
// a necessary building block for Scale
func (scaler *DeploymentConfigScaler) ScaleSimple(namespace, name string, preconditions *kubectl.ScalePrecondition, newSize uint) error {
	dc, err := scaler.dcClient.DeploymentConfigs(namespace).Get(name)
	if err != nil {
		return err
	}
	controller, err := scaler.rcClient.ReplicationControllers(namespace).Get(util.LatestDeploymentNameForConfig(dc))
	if err != nil {
		return kubectl.ControllerScaleError{FailureType: kubectl.ControllerScaleGetFailure, ResourceVersion: "Unknown", ActualError: err}
	}

	if preconditions != nil {
		if err := preconditions.ValidateReplicationController(controller); err != nil {
			return err
		}
	}
	controller.Spec.Replicas = int(newSize)
	// TODO: do retry on 409 errors here?
	if _, err := scaler.rcClient.ReplicationControllers(namespace).Update(controller); err != nil {
		return kubectl.ControllerScaleError{FailureType: kubectl.ControllerScaleUpdateFailure, ResourceVersion: controller.ResourceVersion, ActualError: err}
	}
	// TODO: do a better job of printing objects here.
	return nil
}
Example #21
0
// NewFactory creates an object that holds common methods across all OpenShift commands
func NewFactory(clientConfig kclientcmd.ClientConfig) *Factory {
	var restMapper meta.MultiRESTMapper
	seenGroups := sets.String{}
	for _, gv := range registered.EnabledVersions() {
		if seenGroups.Has(gv.Group) {
			continue
		}
		seenGroups.Insert(gv.Group)

		groupMeta, err := registered.Group(gv.Group)
		if err != nil {
			continue
		}
		restMapper = meta.MultiRESTMapper(append(restMapper, groupMeta.RESTMapper))
	}
	mapper := ShortcutExpander{RESTMapper: kubectl.ShortcutExpander{RESTMapper: restMapper}}

	clients := &clientCache{
		clients: make(map[string]*client.Client),
		configs: make(map[string]*kclient.Config),
		loader:  clientConfig,
	}

	w := &Factory{
		Factory:               cmdutil.NewFactory(clientConfig),
		OpenShiftClientConfig: clientConfig,
		clients:               clients,
	}

	w.Object = func() (meta.RESTMapper, runtime.ObjectTyper) {
		// Output using whatever version was negotiated in the client cache. The
		// version we decode with may not be the same as what the server requires.
		if cfg, err := clients.ClientConfigForVersion(nil); err == nil {
			cmdApiVersion := unversioned.GroupVersion{}
			if cfg.GroupVersion != nil {
				cmdApiVersion = *cfg.GroupVersion
			}
			return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}, api.Scheme
		}
		return mapper, api.Scheme
	}

	kClientForMapping := w.Factory.ClientForMapping
	w.ClientForMapping = func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
		if latest.OriginKind(mapping.GroupVersionKind) {
			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			client, err := clients.ClientForVersion(&mappingVersion)
			if err != nil {
				return nil, err
			}
			return client.RESTClient, nil
		}
		return kClientForMapping(mapping)
	}

	// Save original Describer function
	kDescriberFunc := w.Factory.Describer
	w.Describer = func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
		if latest.OriginKind(mapping.GroupVersionKind) {
			oClient, kClient, err := w.Clients()
			if err != nil {
				return nil, fmt.Errorf("unable to create client %s: %v", mapping.GroupVersionKind.Kind, err)
			}

			mappingVersion := mapping.GroupVersionKind.GroupVersion()
			cfg, err := clients.ClientConfigForVersion(&mappingVersion)
			if err != nil {
				return nil, fmt.Errorf("unable to load a client %s: %v", mapping.GroupVersionKind.Kind, err)
			}

			describer, ok := describe.DescriberFor(mapping.GroupVersionKind.GroupKind(), oClient, kClient, cfg.Host)
			if !ok {
				return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind)
			}
			return describer, nil
		}
		return kDescriberFunc(mapping)
	}
	kScalerFunc := w.Factory.Scaler
	w.Scaler = func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
		oc, kc, err := w.Clients()
		if err != nil {
			return nil, err
		}

		if mapping.GroupVersionKind.GroupKind() == deployapi.Kind("DeploymentConfig") {
			return deployscaler.NewDeploymentConfigScaler(oc, kc), nil
		}
		return kScalerFunc(mapping)
	}
	kReaperFunc := w.Factory.Reaper
	w.Reaper = func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
		oc, kc, err := w.Clients()
		if err != nil {
			return nil, err
		}

		switch mapping.GroupVersionKind.GroupKind() {
		case deployapi.Kind("DeploymentConfig"):
			return deployreaper.NewDeploymentConfigReaper(oc, kc), nil
		case authorizationapi.Kind("Role"):
			return authorizationreaper.NewRoleReaper(oc, oc), nil
		case authorizationapi.Kind("ClusterRole"):
			return authorizationreaper.NewClusterRoleReaper(oc, oc, oc), nil
		case userapi.Kind("User"):
			return authenticationreaper.NewUserReaper(
				client.UsersInterface(oc),
				client.GroupsInterface(oc),
				client.ClusterRoleBindingsInterface(oc),
				client.RoleBindingsNamespacer(oc),
				kclient.SecurityContextConstraintsInterface(kc),
			), nil
		case userapi.Kind("Group"):
			return authenticationreaper.NewGroupReaper(
				client.GroupsInterface(oc),
				client.ClusterRoleBindingsInterface(oc),
				client.RoleBindingsNamespacer(oc),
				kclient.SecurityContextConstraintsInterface(kc),
			), nil
		case buildapi.Kind("BuildConfig"):
			return buildreaper.NewBuildConfigReaper(oc), nil
		}
		return kReaperFunc(mapping)
	}
	kGenerators := w.Factory.Generators
	w.Generators = func(cmdName string) map[string]kubectl.Generator {
		originGenerators := DefaultGenerators(cmdName)
		kubeGenerators := kGenerators(cmdName)

		ret := map[string]kubectl.Generator{}
		for k, v := range kubeGenerators {
			ret[k] = v
		}
		for k, v := range originGenerators {
			ret[k] = v
		}
		return ret
	}
	kPodSelectorForObjectFunc := w.Factory.PodSelectorForObject
	w.PodSelectorForObject = func(object runtime.Object) (string, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			return kubectl.MakeLabels(t.Spec.Selector), nil
		default:
			return kPodSelectorForObjectFunc(object)
		}
	}

	kMapBasedSelectorForObjectFunc := w.Factory.MapBasedSelectorForObject
	w.MapBasedSelectorForObject = func(object runtime.Object) (string, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			return kubectl.MakeLabels(t.Spec.Selector), nil
		default:
			return kMapBasedSelectorForObjectFunc(object)
		}

	}

	kPortsForObjectFunc := w.Factory.PortsForObject
	w.PortsForObject = func(object runtime.Object) ([]string, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			return getPorts(t.Spec.Template.Spec), nil
		default:
			return kPortsForObjectFunc(object)
		}
	}
	kLogsForObjectFunc := w.Factory.LogsForObject
	w.LogsForObject = func(object, options runtime.Object) (*kclient.Request, error) {
		oc, _, err := w.Clients()
		if err != nil {
			return nil, err
		}

		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			dopts, ok := options.(*deployapi.DeploymentLogOptions)
			if !ok {
				return nil, errors.New("provided options object is not a DeploymentLogOptions")
			}
			return oc.DeploymentLogs(t.Namespace).Get(t.Name, *dopts), nil
		case *buildapi.Build:
			bopts, ok := options.(*buildapi.BuildLogOptions)
			if !ok {
				return nil, errors.New("provided options object is not a BuildLogOptions")
			}
			if bopts.Version != nil {
				return nil, errors.New("cannot specify a version and a build")
			}
			return oc.BuildLogs(t.Namespace).Get(t.Name, *bopts), nil
		case *buildapi.BuildConfig:
			bopts, ok := options.(*buildapi.BuildLogOptions)
			if !ok {
				return nil, errors.New("provided options object is not a BuildLogOptions")
			}
			builds, err := oc.Builds(t.Namespace).List(api.ListOptions{})
			if err != nil {
				return nil, err
			}
			builds.Items = buildapi.FilterBuilds(builds.Items, buildapi.ByBuildConfigLabelPredicate(t.Name))
			if len(builds.Items) == 0 {
				return nil, fmt.Errorf("no builds found for %q", t.Name)
			}
			if bopts.Version != nil {
				// If a version has been specified, try to get the logs from that build.
				desired := buildutil.BuildNameForConfigVersion(t.Name, int(*bopts.Version))
				return oc.BuildLogs(t.Namespace).Get(desired, *bopts), nil
			}
			sort.Sort(sort.Reverse(buildapi.BuildSliceByCreationTimestamp(builds.Items)))
			return oc.BuildLogs(t.Namespace).Get(builds.Items[0].Name, *bopts), nil
		default:
			return kLogsForObjectFunc(object, options)
		}
	}
	w.Printer = func(mapping *meta.RESTMapping, noHeaders, withNamespace, wide bool, showAll bool, showLabels, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) {
		return describe.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, showLabels, absoluteTimestamps, columnLabels), nil
	}
	kCanBeExposed := w.Factory.CanBeExposed
	w.CanBeExposed = func(kind unversioned.GroupKind) error {
		if kind == deployapi.Kind("DeploymentConfig") {
			return nil
		}
		return kCanBeExposed(kind)
	}
	kCanBeAutoscaled := w.Factory.CanBeAutoscaled
	w.CanBeAutoscaled = func(kind unversioned.GroupKind) error {
		if kind == deployapi.Kind("DeploymentConfig") {
			return nil
		}
		return kCanBeAutoscaled(kind)
	}
	kAttachablePodForObjectFunc := w.Factory.AttachablePodForObject
	w.AttachablePodForObject = func(object runtime.Object) (*api.Pod, error) {
		oc, kc, err := w.Clients()
		if err != nil {
			return nil, err
		}
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			var err error
			var pods *api.PodList
			for pods == nil || len(pods.Items) == 0 {
				if t.Status.LatestVersion == 0 {
					time.Sleep(2 * time.Second)
				}
				if t, err = oc.DeploymentConfigs(t.Namespace).Get(t.Name); err != nil {
					return nil, err
				}
				latestDeploymentName := deployutil.LatestDeploymentNameForConfig(t)
				deployment, err := kc.ReplicationControllers(t.Namespace).Get(latestDeploymentName)
				if err != nil {
					if kerrors.IsNotFound(err) {
						continue
					}
					return nil, err
				}
				pods, err = kc.Pods(deployment.Namespace).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(deployment.Spec.Selector)})
				if err != nil {
					return nil, err
				}
				if len(pods.Items) == 0 {
					time.Sleep(2 * time.Second)
				}
			}
			var oldestPod *api.Pod
			for _, pod := range pods.Items {
				if oldestPod == nil || pod.CreationTimestamp.Before(oldestPod.CreationTimestamp) {
					oldestPod = &pod
				}
			}
			return oldestPod, nil
		default:
			return kAttachablePodForObjectFunc(object)
		}
	}
	kSwaggerSchemaFunc := w.Factory.SwaggerSchema
	w.Factory.SwaggerSchema = func(gvk unversioned.GroupVersionKind) (*swagger.ApiDeclaration, error) {
		if !latest.OriginKind(gvk) {
			return kSwaggerSchemaFunc(gvk)
		}
		// TODO: we need to register the OpenShift API under the Kube group, and start returning the OpenShift
		// group from the scheme.
		oc, _, err := w.Clients()
		if err != nil {
			return nil, err
		}
		return w.OriginSwaggerSchema(oc.RESTClient, gvk.GroupVersion())
	}

	w.EditorEnvs = func() []string {
		return []string{"OC_EDITOR", "EDITOR"}
	}

	return w
}
Example #22
0
// ApproximatePodTemplateForObject returns a pod template object for the provided source.
// It may return both an error and a object. It attempt to return the best possible template
// available at the current time.
func (w *Factory) ApproximatePodTemplateForObject(object runtime.Object) (*api.PodTemplateSpec, error) {
	switch t := object.(type) {
	case *imageapi.ImageStreamTag:
		// create a minimal pod spec that uses the image referenced by the istag without any introspection
		// it possible that we could someday do a better job introspecting it
		return &api.PodTemplateSpec{
			Spec: api.PodSpec{
				RestartPolicy: api.RestartPolicyNever,
				Containers: []api.Container{
					{Name: "container-00", Image: t.Image.DockerImageReference},
				},
			},
		}, nil
	case *imageapi.ImageStreamImage:
		// create a minimal pod spec that uses the image referenced by the istag without any introspection
		// it possible that we could someday do a better job introspecting it
		return &api.PodTemplateSpec{
			Spec: api.PodSpec{
				RestartPolicy: api.RestartPolicyNever,
				Containers: []api.Container{
					{Name: "container-00", Image: t.Image.DockerImageReference},
				},
			},
		}, nil
	case *deployapi.DeploymentConfig:
		fallback := t.Spec.Template

		_, kc, err := w.Clients()
		if err != nil {
			return fallback, err
		}

		latestDeploymentName := deployutil.LatestDeploymentNameForConfig(t)
		deployment, err := kc.ReplicationControllers(t.Namespace).Get(latestDeploymentName)
		if err != nil {
			return fallback, err
		}

		fallback = deployment.Spec.Template

		pods, err := kc.Pods(deployment.Namespace).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(deployment.Spec.Selector)})
		if err != nil {
			return fallback, err
		}

		for i := range pods.Items {
			pod := &pods.Items[i]
			if fallback == nil || pod.CreationTimestamp.Before(fallback.CreationTimestamp) {
				fallback = &api.PodTemplateSpec{
					ObjectMeta: pod.ObjectMeta,
					Spec:       pod.Spec,
				}
			}
		}
		return fallback, nil

	default:
		pod, err := w.AttachablePodForObject(object)
		if pod != nil {
			return &api.PodTemplateSpec{
				ObjectMeta: pod.ObjectMeta,
				Spec:       pod.Spec,
			}, err
		}
		switch t := object.(type) {
		case *api.ReplicationController:
			return t.Spec.Template, err
		case *extensions.ReplicaSet:
			return &t.Spec.Template, err
		case *extensions.DaemonSet:
			return &t.Spec.Template, err
		case *batch.Job:
			return &t.Spec.Template, err
		}
		return nil, err
	}
}
Example #23
0
// Handle processes change triggers for config.
func (c *DeploymentConfigChangeController) Handle(config *deployapi.DeploymentConfig) error {
	hasChangeTrigger := false
	for _, trigger := range config.Triggers {
		if trigger.Type == deployapi.DeploymentTriggerOnConfigChange {
			hasChangeTrigger = true
			break
		}
	}

	if !hasChangeTrigger {
		glog.V(4).Infof("Ignoring DeploymentConfig %s; no change triggers detected", deployutil.LabelForDeploymentConfig(config))
		return nil
	}

	if config.LatestVersion == 0 {
		_, _, err := c.generateDeployment(config)
		if err != nil {
			if kerrors.IsConflict(err) {
				return fatalError(fmt.Sprintf("DeploymentConfig %s updated since retrieval; aborting trigger: %v", deployutil.LabelForDeploymentConfig(config), err))
			}
			// TODO: This needs handled by setting some state within the API so
			// users know why the trigger isn't doing anything.
			// https://github.com/openshift/origin/issues/3526
			return nil
		}
		glog.V(4).Infof("Created initial Deployment for DeploymentConfig %s", deployutil.LabelForDeploymentConfig(config))
		return nil
	}

	latestDeploymentName := deployutil.LatestDeploymentNameForConfig(config)
	deployment, err := c.changeStrategy.getDeployment(config.Namespace, latestDeploymentName)
	if err != nil {
		// If there's no deployment for the latest config, we have no basis of
		// comparison. It's the responsibility of the deployment config controller
		// to make the deployment for the config, so return early.
		if kerrors.IsNotFound(err) {
			glog.V(2).Infof("Ignoring change for DeploymentConfig %s; no existing Deployment found", deployutil.LabelForDeploymentConfig(config))
			return nil
		}
		return fmt.Errorf("couldn't retrieve Deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}

	deployedConfig, err := c.decodeConfig(deployment)
	if err != nil {
		return fatalError(fmt.Sprintf("error decoding DeploymentConfig from Deployment %s for DeploymentConfig %s: %v", deployutil.LabelForDeployment(deployment), deployutil.LabelForDeploymentConfig(config), err))
	}

	newSpec, oldSpec := config.Template.ControllerTemplate.Template.Spec, deployedConfig.Template.ControllerTemplate.Template.Spec
	if kapi.Semantic.DeepEqual(oldSpec, newSpec) {
		glog.V(2).Infof("Ignoring DeploymentConfig change for %s (latestVersion=%d); same as Deployment %s", deployutil.LabelForDeploymentConfig(config), config.LatestVersion, deployutil.LabelForDeployment(deployment))
		return nil
	}

	fromVersion, toVersion, err := c.generateDeployment(config)
	if err != nil {
		if kerrors.IsConflict(err) {
			return fatalError(fmt.Sprintf("DeploymentConfig %s updated since retrieval; aborting trigger: %v", deployutil.LabelForDeploymentConfig(config), err))
		}
		return fmt.Errorf("couldn't generate deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
	}
	glog.V(4).Infof("Updated DeploymentConfig %s from version %d to %d for existing deployment %s", deployutil.LabelForDeploymentConfig(config), fromVersion, toVersion, deployutil.LabelForDeployment(deployment))
	return nil
}
Example #24
0
// NewFactory creates an object that holds common methods across all OpenShift commands
func NewFactory(clientConfig kclientcmd.ClientConfig) *Factory {
	mapper := ShortcutExpander{RESTMapper: kubectl.ShortcutExpander{RESTMapper: latest.RESTMapper}}

	clients := &clientCache{
		clients: make(map[string]*client.Client),
		configs: make(map[string]*kclient.Config),
		loader:  clientConfig,
	}

	generators := map[string]kubectl.Generator{
		"route/v1":          routegen.RouteGenerator{},
		"run/v1":            deploygen.BasicDeploymentConfigController{},
		"run-controller/v1": kubectl.BasicReplicationController{},
	}

	w := &Factory{
		Factory:               cmdutil.NewFactory(clientConfig),
		OpenShiftClientConfig: clientConfig,
		clients:               clients,
	}

	w.Object = func() (meta.RESTMapper, runtime.ObjectTyper) {
		// Output using whatever version was negotiated in the client cache. The
		// version we decode with may not be the same as what the server requires.
		if cfg, err := clients.ClientConfigForVersion(""); err == nil {
			return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersion: cfg.Version}, api.Scheme
		}
		return mapper, api.Scheme
	}

	kRESTClient := w.Factory.RESTClient
	w.RESTClient = func(mapping *meta.RESTMapping) (resource.RESTClient, error) {
		if latest.OriginKind(mapping.Kind, mapping.APIVersion) {
			client, err := clients.ClientForVersion(mapping.APIVersion)
			if err != nil {
				return nil, err
			}
			return client.RESTClient, nil
		}
		return kRESTClient(mapping)
	}

	// Save original Describer function
	kDescriberFunc := w.Factory.Describer
	w.Describer = func(mapping *meta.RESTMapping) (kubectl.Describer, error) {
		if latest.OriginKind(mapping.Kind, mapping.APIVersion) {
			oClient, kClient, err := w.Clients()
			if err != nil {
				return nil, fmt.Errorf("unable to create client %s: %v", mapping.Kind, err)
			}

			cfg, err := clients.ClientConfigForVersion(mapping.APIVersion)
			if err != nil {
				return nil, fmt.Errorf("unable to load a client %s: %v", mapping.Kind, err)
			}

			describer, ok := describe.DescriberFor(mapping.Kind, oClient, kClient, cfg.Host)
			if !ok {
				return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind)
			}
			return describer, nil
		}
		return kDescriberFunc(mapping)
	}
	kScalerFunc := w.Factory.Scaler
	w.Scaler = func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
		oc, kc, err := w.Clients()
		if err != nil {
			return nil, err
		}

		if mapping.Kind == "DeploymentConfig" {
			return deployscaler.NewDeploymentConfigScaler(oc, kc), nil
		}
		return kScalerFunc(mapping)
	}
	kReaperFunc := w.Factory.Reaper
	w.Reaper = func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
		oc, kc, err := w.Clients()
		if err != nil {
			return nil, err
		}

		switch mapping.Kind {
		case "DeploymentConfig":
			return deployreaper.NewDeploymentConfigReaper(oc, kc), nil
		case "Role":
			return authorizationreaper.NewRoleReaper(oc, oc), nil
		case "ClusterRole":
			return authorizationreaper.NewClusterRoleReaper(oc, oc, oc), nil
		case "User":
			return authenticationreaper.NewUserReaper(
				client.UsersInterface(oc),
				client.GroupsInterface(oc),
				client.ClusterRoleBindingsInterface(oc),
				client.RoleBindingsNamespacer(oc),
				kclient.SecurityContextConstraintsInterface(kc),
			), nil
		case "Group":
			return authenticationreaper.NewGroupReaper(
				client.GroupsInterface(oc),
				client.ClusterRoleBindingsInterface(oc),
				client.RoleBindingsNamespacer(oc),
				kclient.SecurityContextConstraintsInterface(kc),
			), nil
		}
		return kReaperFunc(mapping)
	}
	kGeneratorFunc := w.Factory.Generator
	w.Generator = func(name string) (kubectl.Generator, bool) {
		if generator, ok := generators[name]; ok {
			return generator, true
		}
		return kGeneratorFunc(name)
	}
	kPodSelectorForObjectFunc := w.Factory.PodSelectorForObject
	w.PodSelectorForObject = func(object runtime.Object) (string, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			return kubectl.MakeLabels(t.Spec.Selector), nil
		default:
			return kPodSelectorForObjectFunc(object)
		}
	}
	kPortsForObjectFunc := w.Factory.PortsForObject
	w.PortsForObject = func(object runtime.Object) ([]string, error) {
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			return getPorts(t.Spec.Template.Spec), nil
		default:
			return kPortsForObjectFunc(object)
		}
	}
	kLogsForObjectFunc := w.Factory.LogsForObject
	w.LogsForObject = func(object, options runtime.Object) (*kclient.Request, error) {
		oc, _, err := w.Clients()
		if err != nil {
			return nil, err
		}

		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			dopts, ok := options.(*deployapi.DeploymentLogOptions)
			if !ok {
				return nil, errors.New("provided options object is not a DeploymentLogOptions")
			}
			return oc.DeploymentLogs(t.Namespace).Get(t.Name, *dopts), nil
		case *buildapi.Build:
			bopts, ok := options.(*buildapi.BuildLogOptions)
			if !ok {
				return nil, errors.New("provided options object is not a BuildLogOptions")
			}
			if bopts.Version != nil {
				// should --version work with builds at all?
				return nil, errors.New("cannot specify a version and a build")
			}
			return oc.BuildLogs(t.Namespace).Get(t.Name, *bopts), nil
		case *buildapi.BuildConfig:
			bopts, ok := options.(*buildapi.BuildLogOptions)
			if !ok {
				return nil, errors.New("provided options object is not a BuildLogOptions")
			}
			builds, err := oc.Builds(t.Namespace).List(labels.Everything(), fields.Everything())
			if err != nil {
				return nil, err
			}
			builds.Items = buildapi.FilterBuilds(builds.Items, buildapi.ByBuildConfigLabelPredicate(t.Name))
			if len(builds.Items) == 0 {
				return nil, fmt.Errorf("no builds found for %s", t.Name)
			}
			if bopts.Version != nil {
				// If a version has been specified, try to get the logs from that build.
				desired := buildutil.BuildNameForConfigVersion(t.Name, int(*bopts.Version))
				return oc.BuildLogs(t.Namespace).Get(desired, *bopts), nil
			}
			sort.Sort(sort.Reverse(buildapi.BuildSliceByCreationTimestamp(builds.Items)))
			return oc.BuildLogs(t.Namespace).Get(builds.Items[0].Name, *bopts), nil
		default:
			return kLogsForObjectFunc(object, options)
		}
	}
	w.Printer = func(mapping *meta.RESTMapping, noHeaders, withNamespace, wide bool, showAll bool, columnLabels []string) (kubectl.ResourcePrinter, error) {
		return describe.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, columnLabels), nil
	}
	kCanBeExposed := w.Factory.CanBeExposed
	w.CanBeExposed = func(kind string) error {
		if kind == "DeploymentConfig" {
			return nil
		}
		return kCanBeExposed(kind)
	}
	kAttachablePodForObjectFunc := w.Factory.AttachablePodForObject
	w.AttachablePodForObject = func(object runtime.Object) (*api.Pod, error) {
		oc, kc, err := w.Clients()
		if err != nil {
			return nil, err
		}
		switch t := object.(type) {
		case *deployapi.DeploymentConfig:
			var err error
			var pods *api.PodList
			for pods == nil || len(pods.Items) == 0 {
				if t.Status.LatestVersion == 0 {
					time.Sleep(2 * time.Second)
				}
				if t, err = oc.DeploymentConfigs(t.Namespace).Get(t.Name); err != nil {
					return nil, err
				}
				latestDeploymentName := deployutil.LatestDeploymentNameForConfig(t)
				deployment, err := kc.ReplicationControllers(t.Namespace).Get(latestDeploymentName)
				if err != nil {
					if kerrors.IsNotFound(err) {
						continue
					}
					return nil, err
				}
				pods, err = kc.Pods(deployment.Namespace).List(labels.SelectorFromSet(deployment.Spec.Selector), fields.Everything())
				if err != nil {
					return nil, err
				}
				if len(pods.Items) == 0 {
					time.Sleep(2 * time.Second)
				}
			}
			var oldestPod *api.Pod
			for _, pod := range pods.Items {
				if oldestPod == nil || pod.CreationTimestamp.Before(oldestPod.CreationTimestamp) {
					oldestPod = &pod
				}
			}
			return oldestPod, nil
		default:
			return kAttachablePodForObjectFunc(object)
		}
	}

	return w
}
Example #25
0
func (o RetryOptions) Run() error {
	allErrs := []error{}
	mapping, err := o.Mapper.RESTMapping(kapi.Kind("ReplicationController"))
	if err != nil {
		return err
	}
	for _, info := range o.Infos {
		config, ok := info.Object.(*deployapi.DeploymentConfig)
		if !ok {
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("expected deployment configuration, got %T", info.Object)))
			continue
		}
		if config.Spec.Paused {
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("unable to retry paused deployment config %q", config.Name)))
			continue
		}
		if config.Status.LatestVersion == 0 {
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("no rollouts found for %q", config.Name)))
			continue
		}

		latestDeploymentName := deployutil.LatestDeploymentNameForConfig(config)
		rc, err := o.Clientset.ReplicationControllers(config.Namespace).Get(latestDeploymentName)
		if err != nil {
			if kerrors.IsNotFound(err) {
				allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("unable to find the latest rollout (#%d).\nYou can start a new rollout with 'oc rollout latest dc/%s'.", config.Status.LatestVersion, config.Name)))
				continue
			}
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("unable to fetch replication controller %q", config.Name)))
			continue
		}

		if !deployutil.IsFailedDeployment(rc) {
			message := fmt.Sprintf("rollout #%d is %s; only failed deployments can be retried.\n", config.Status.LatestVersion, strings.ToLower(string(deployutil.DeploymentStatusFor(rc))))
			if deployutil.IsCompleteDeployment(rc) {
				message += fmt.Sprintf("You can start a new deployment with 'oc rollout latest dc/%s'.", config.Name)
			} else {
				message += fmt.Sprintf("Optionally, you can cancel this deployment with 'oc rollout cancel dc/%s'.", config.Name)
			}
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, errors.New(message)))
			continue
		}

		// Delete the deployer pod as well as the deployment hooks pods, if any
		pods, err := o.Clientset.Pods(config.Namespace).List(kapi.ListOptions{LabelSelector: deployutil.DeployerPodSelector(latestDeploymentName)})
		if err != nil {
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("failed to list deployer/hook pods for deployment #%d: %v", config.Status.LatestVersion, err)))
			continue
		}
		hasError := false
		for _, pod := range pods.Items {
			err := o.Clientset.Pods(pod.Namespace).Delete(pod.Name, kapi.NewDeleteOptions(0))
			if err != nil {
				allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("failed to delete deployer/hook pod %s for deployment #%d: %v", pod.Name, config.Status.LatestVersion, err)))
				hasError = true
			}
		}
		if hasError {
			continue
		}

		patches := set.CalculatePatches([]*resource.Info{{Object: rc, Mapping: mapping}}, o.Encoder, func(*resource.Info) (bool, error) {
			rc.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusNew)
			delete(rc.Annotations, deployapi.DeploymentStatusReasonAnnotation)
			delete(rc.Annotations, deployapi.DeploymentCancelledAnnotation)
			return true, nil
		})

		if len(patches) == 0 {
			kcmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, false, "already retried")
			continue
		}

		if _, err := o.Clientset.ReplicationControllers(rc.Namespace).Patch(rc.Name, kapi.StrategicMergePatchType, patches[0].Patch); err != nil {
			allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, err))
			continue
		}
		kcmdutil.PrintSuccess(o.Mapper, false, o.Out, info.Mapping.Resource, info.Name, false, fmt.Sprintf("retried rollout #%d", config.Status.LatestVersion))
	}

	return utilerrors.NewAggregate(allErrs)
}
Example #26
0
// Describe returns the description of a DeploymentConfig
func (d *DeploymentConfigDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) {
	var deploymentConfig *deployapi.DeploymentConfig
	if d.config != nil {
		// If a deployment config is already provided use that.
		// This is used by `oc rollback --dry-run`.
		deploymentConfig = d.config
	} else {
		var err error
		deploymentConfig, err = d.osClient.DeploymentConfigs(namespace).Get(name)
		if err != nil {
			return "", err
		}
	}

	return tabbedString(func(out *tabwriter.Writer) error {
		formatMeta(out, deploymentConfig.ObjectMeta)
		var (
			deploymentsHistory   []kapi.ReplicationController
			activeDeploymentName string
		)

		if d.config == nil {
			if rcs, err := d.kubeClient.ReplicationControllers(namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(deploymentConfig.Name)}); err == nil {
				deploymentsHistory = rcs.Items
			}
		}

		if deploymentConfig.Status.LatestVersion == 0 {
			formatString(out, "Latest Version", "Not deployed")
		} else {
			formatString(out, "Latest Version", strconv.FormatInt(deploymentConfig.Status.LatestVersion, 10))
		}

		printDeploymentConfigSpec(d.kubeClient, *deploymentConfig, out)
		fmt.Fprintln(out)

		latestDeploymentName := deployutil.LatestDeploymentNameForConfig(deploymentConfig)
		if activeDeployment := deployutil.ActiveDeployment(deploymentConfig, deploymentsHistory); activeDeployment != nil {
			activeDeploymentName = activeDeployment.Name
		}

		var deployment *kapi.ReplicationController
		isNotDeployed := len(deploymentsHistory) == 0
		for _, item := range deploymentsHistory {
			if item.Name == latestDeploymentName {
				deployment = &item
			}
		}

		if isNotDeployed {
			formatString(out, "Latest Deployment", "<none>")
		} else {
			header := fmt.Sprintf("Deployment #%d (latest)", deployutil.DeploymentVersionFor(deployment))
			// Show details if the current deployment is the active one or it is the
			// initial deployment.
			printDeploymentRc(deployment, d.kubeClient, out, header, (deployment.Name == activeDeploymentName) || len(deploymentsHistory) == 1)
		}

		// We don't show the deployment history when running `oc rollback --dry-run`.
		if d.config == nil && !isNotDeployed {
			sorted := deploymentsHistory
			sort.Sort(sort.Reverse(rcutils.OverlappingControllers(sorted)))
			counter := 1
			for _, item := range sorted {
				if item.Name != latestDeploymentName && deploymentConfig.Name == deployutil.DeploymentConfigNameFor(&item) {
					header := fmt.Sprintf("Deployment #%d", deployutil.DeploymentVersionFor(&item))
					printDeploymentRc(&item, d.kubeClient, out, header, item.Name == activeDeploymentName)
					counter++
				}
				if counter == maxDisplayDeployments {
					break
				}
			}
		}

		if settings.ShowEvents {
			// Events
			if events, err := d.kubeClient.Events(deploymentConfig.Namespace).Search(deploymentConfig); err == nil && events != nil {
				latestDeploymentEvents := &kapi.EventList{Items: []kapi.Event{}}
				for i := len(events.Items); i != 0 && i > len(events.Items)-maxDisplayDeploymentsEvents; i-- {
					latestDeploymentEvents.Items = append(latestDeploymentEvents.Items, events.Items[i-1])
				}
				fmt.Fprintln(out)
				kctl.DescribeEvents(latestDeploymentEvents, out)
			}
		}
		return nil
	})
}
Example #27
0
// Describe returns the description of a DeploymentConfig
func (d *DeploymentConfigDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) {
	var deploymentConfig *deployapi.DeploymentConfig
	if d.config != nil {
		// If a deployment config is already provided use that.
		// This is used by `oc rollback --dry-run`.
		deploymentConfig = d.config
	} else {
		var err error
		deploymentConfig, err = d.osClient.DeploymentConfigs(namespace).Get(name)
		if err != nil {
			return "", err
		}
	}

	return tabbedString(func(out *tabwriter.Writer) error {
		formatMeta(out, deploymentConfig.ObjectMeta)

		if deploymentConfig.Status.LatestVersion == 0 {
			formatString(out, "Latest Version", "Not deployed")
		} else {
			formatString(out, "Latest Version", strconv.FormatInt(deploymentConfig.Status.LatestVersion, 10))
		}

		printDeploymentConfigSpec(d.kubeClient, *deploymentConfig, out)
		fmt.Fprintln(out)

		if deploymentConfig.Status.Details != nil && len(deploymentConfig.Status.Details.Message) > 0 {
			fmt.Fprintf(out, "Warning:\t%s\n", deploymentConfig.Status.Details.Message)
		}

		deploymentName := deployutil.LatestDeploymentNameForConfig(deploymentConfig)
		deployment, err := d.kubeClient.ReplicationControllers(namespace).Get(deploymentName)
		if err != nil {
			if kerrors.IsNotFound(err) {
				formatString(out, "Latest Deployment", "<none>")
			} else {
				formatString(out, "Latest Deployment", fmt.Sprintf("error: %v", err))
			}
		} else {
			header := fmt.Sprintf("Deployment #%d (latest)", deployutil.DeploymentVersionFor(deployment))
			printDeploymentRc(deployment, d.kubeClient, out, header, true)
		}
		// We don't show the deployment history when running `oc rollback --dry-run`.
		if d.config == nil {
			deploymentsHistory, err := d.kubeClient.ReplicationControllers(namespace).List(kapi.ListOptions{LabelSelector: labels.Everything()})
			if err == nil {
				sorted := deploymentsHistory.Items
				sort.Sort(sort.Reverse(rcutils.OverlappingControllers(sorted)))
				counter := 1
				for _, item := range sorted {
					if item.Name != deploymentName && deploymentConfig.Name == deployutil.DeploymentConfigNameFor(&item) {
						header := fmt.Sprintf("Deployment #%d", deployutil.DeploymentVersionFor(&item))
						printDeploymentRc(&item, d.kubeClient, out, header, false)
						counter++
					}
					if counter == maxDisplayDeployments {
						break
					}
				}
			}
		}

		if settings.ShowEvents {
			// Events
			if events, err := d.kubeClient.Events(deploymentConfig.Namespace).Search(deploymentConfig); err == nil && events != nil {
				latestDeploymentEvents := &kapi.EventList{Items: []kapi.Event{}}
				for i := len(events.Items); i != 0 && i > len(events.Items)-maxDisplayDeploymentsEvents; i-- {
					latestDeploymentEvents.Items = append(latestDeploymentEvents.Items, events.Items[i-1])
				}
				fmt.Fprintln(out)
				kctl.DescribeEvents(latestDeploymentEvents, out)
			}
		}
		return nil
	})
}