// Describe returns the description of a DeploymentConfig func (d *DeploymentConfigDescriber) Describe(namespace, name string) (string, error) { deploymentConfig, err := d.client.getDeploymentConfig(namespace, name) if err != nil { return "", err } events, err := d.client.listEvents(deploymentConfig) if err != nil { return "", err } return tabbedString(func(out *tabwriter.Writer) error { formatMeta(out, deploymentConfig.ObjectMeta) if deploymentConfig.Status.LatestVersion == 0 { formatString(out, "Latest Version", "Not deployed") } else { formatString(out, "Latest Version", strconv.Itoa(deploymentConfig.Status.LatestVersion)) } printTriggers(deploymentConfig.Spec.Triggers, out) formatString(out, "Strategy", deploymentConfig.Spec.Strategy.Type) printStrategy(deploymentConfig.Spec.Strategy, out) printDeploymentConfigSpec(deploymentConfig.Spec, out) if deploymentConfig.Status.Details != nil && len(deploymentConfig.Status.Details.Message) > 0 { fmt.Fprintf(out, "Warning:\t%s\n", deploymentConfig.Status.Details.Message) } deploymentName := deployutil.LatestDeploymentNameForConfig(deploymentConfig) deployment, err := d.client.getDeployment(namespace, deploymentName) if err != nil { if kerrors.IsNotFound(err) { formatString(out, "Latest Deployment", "<none>") } else { formatString(out, "Latest Deployment", fmt.Sprintf("error: %v", err)) } } else { header := fmt.Sprintf("Deployment #%d (latest)", deployutil.DeploymentVersionFor(deployment)) printDeploymentRc(deployment, d.client, out, header, true) } deploymentsHistory, err := d.client.listDeployments(namespace, labels.Everything()) if err == nil { sorted := rcSorter{} sorted = append(sorted, deploymentsHistory.Items...) sort.Sort(sorted) for _, item := range sorted { if item.Name != deploymentName && deploymentConfig.Name == deployutil.DeploymentConfigNameFor(&item) { header := fmt.Sprintf("Deployment #%d", deployutil.DeploymentVersionFor(&item)) printDeploymentRc(&item, d.client, out, header, false) } } } if events != nil { kctl.DescribeEvents(events, out) } return nil }) }
func TestTriggers_manual(t *testing.T) { testutil.DeleteAllEtcdKeys() openshift := NewTestDeployOpenshift(t) defer openshift.Close() config := deploytest.OkDeploymentConfig(0) config.Namespace = testutil.Namespace() config.Triggers = []deployapi.DeploymentTriggerPolicy{ { Type: deployapi.DeploymentTriggerManual, }, } dc, err := openshift.Client.DeploymentConfigs(testutil.Namespace()).Create(config) if err != nil { t.Fatalf("Couldn't create DeploymentConfig: %v %#v", err, config) } watch, err := openshift.KubeClient.ReplicationControllers(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), dc.ResourceVersion) if err != nil { t.Fatalf("Couldn't subscribe to Deployments: %v", err) } defer watch.Stop() retryErr := kclient.RetryOnConflict(wait.Backoff{Steps: maxUpdateRetries}, func() error { config, err := openshift.Client.DeploymentConfigs(testutil.Namespace()).Generate(config.Name) if err != nil { return err } if config.LatestVersion != 1 { t.Fatalf("Generated deployment should have version 1: %#v", config) } t.Logf("config(1): %#v", config) updatedConfig, err := openshift.Client.DeploymentConfigs(testutil.Namespace()).Update(config) if err != nil { return err } t.Logf("config(2): %#v", updatedConfig) return nil }) if retryErr != nil { t.Fatal(err) } event := <-watch.ResultChan() if e, a := watchapi.Added, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } deployment := event.Object.(*kapi.ReplicationController) if e, a := config.Name, deployutil.DeploymentConfigNameFor(deployment); e != a { t.Fatalf("Expected deployment annotated with deploymentConfig '%s', got '%s'", e, a) } if e, a := 1, deployutil.DeploymentVersionFor(deployment); e != a { t.Fatalf("Deployment annotation version does not match: %#v", deployment) } }
// DeploymentByDeploymentConfigIndexFunc indexes Deployment items by their associated DeploymentConfig, if none, index with key "orphan" func DeploymentByDeploymentConfigIndexFunc(obj interface{}) ([]string, error) { controller, ok := obj.(*kapi.ReplicationController) if !ok { return nil, fmt.Errorf("not a replication controller: %v", obj) } name := deployutil.DeploymentConfigNameFor(controller) if len(name) == 0 { return []string{"orphan"}, nil } return []string{controller.Namespace + "/" + name}, nil }
// GetConfigForController returns the managing deployment config for the provided replication controller. func (s *StoreToDeploymentConfigLister) GetConfigForController(rc *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) { dcName := deployutil.DeploymentConfigNameFor(rc) obj, exists, err := s.Indexer.GetByKey(rc.Namespace + "/" + dcName) if err != nil { return nil, err } if !exists { return nil, fmt.Errorf("deployment config %q not found", dcName) } return obj.(*deployapi.DeploymentConfig), nil }
// GetConfigForPod returns the managing deployment config for the provided pod. func (s *StoreToDeploymentConfigLister) GetConfigForPod(pod *kapi.Pod) (*deployapi.DeploymentConfig, error) { dcName := deployutil.DeploymentConfigNameFor(pod) obj, exists, err := s.Indexer.GetByKey(pod.Namespace + "/" + dcName) if err != nil { return nil, err } if !exists { return nil, kapierrors.NewNotFound(deployapi.Resource("deploymentconfig"), dcName) } return obj.(*deployapi.DeploymentConfig), nil }
// GetDeploymentConfig gets the configuration for the given deployment func (d *dataSet) GetDeploymentConfig(controller *kapi.ReplicationController) (*deployapi.DeploymentConfig, bool, error) { name := deployutil.DeploymentConfigNameFor(controller) if len(name) == 0 { return nil, false, nil } var deploymentConfig *deployapi.DeploymentConfig key := &deployapi.DeploymentConfig{ObjectMeta: kapi.ObjectMeta{Name: name, Namespace: controller.Namespace}} item, exists, err := d.deploymentConfigStore.Get(key) if exists { deploymentConfig = item.(*deployapi.DeploymentConfig) } return deploymentConfig, exists, err }
// FilterDeploymentsPredicate is a function that returns true if the replication controller is associated with a DeploymentConfig func FilterDeploymentsPredicate(item *kapi.ReplicationController) bool { return len(deployutil.DeploymentConfigNameFor(item)) > 0 }
func TestTriggers_manual(t *testing.T) { const namespace = "test-triggers-manual" _, clusterAdminKubeConfig, err := testserver.StartTestMaster() checkErr(t, err) clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) checkErr(t, err) clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) checkErr(t, err) _, err = testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, namespace, "my-test-user") checkErr(t, err) osClient, kubeClient, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "my-test-user") checkErr(t, err) config := deploytest.OkDeploymentConfig(0) config.Namespace = namespace config.Spec.Triggers = []deployapi.DeploymentTriggerPolicy{ { Type: deployapi.DeploymentTriggerManual, }, } dc, err := osClient.DeploymentConfigs(namespace).Create(config) if err != nil { t.Fatalf("Couldn't create DeploymentConfig: %v %#v", err, config) } rcWatch, err := kubeClient.ReplicationControllers(namespace).Watch(labels.Everything(), fields.Everything(), dc.ResourceVersion) if err != nil { t.Fatalf("Couldn't subscribe to Deployments: %v", err) } defer rcWatch.Stop() retryErr := kclient.RetryOnConflict(wait.Backoff{Steps: maxUpdateRetries}, func() error { config, err := osClient.DeploymentConfigs(namespace).Generate(config.Name) if err != nil { return err } if config.Status.LatestVersion != 1 { t.Fatalf("Generated deployment should have version 1: %#v", config) } t.Logf("config(1): %#v", config) updatedConfig, err := osClient.DeploymentConfigs(namespace).Update(config) if err != nil { return err } t.Logf("config(2): %#v", updatedConfig) return nil }) if retryErr != nil { t.Fatal(err) } event := <-rcWatch.ResultChan() if e, a := watchapi.Added, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } deployment := event.Object.(*kapi.ReplicationController) if e, a := config.Name, deployutil.DeploymentConfigNameFor(deployment); e != a { t.Fatalf("Expected deployment annotated with deploymentConfig '%s', got '%s'", e, a) } if e, a := 1, deployutil.DeploymentVersionFor(deployment); e != a { t.Fatalf("Deployment annotation version does not match: %#v", deployment) } }
func TestTriggers_configChange(t *testing.T) { const namespace = "test-triggers-configchange" _, clusterAdminKubeConfig, err := testserver.StartTestMaster() checkErr(t, err) clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) checkErr(t, err) clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) checkErr(t, err) _, err = testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, namespace, "my-test-user") checkErr(t, err) osClient, kubeClient, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "my-test-user") checkErr(t, err) config := deploytest.OkDeploymentConfig(0) config.Namespace = namespace config.Spec.Triggers[0] = deploytest.OkConfigChangeTrigger() rcWatch, err := kubeClient.ReplicationControllers(namespace).Watch(labels.Everything(), fields.Everything(), "0") if err != nil { t.Fatalf("Couldn't subscribe to Deployments %v", err) } defer rcWatch.Stop() // submit the initial deployment config if _, err := osClient.DeploymentConfigs(namespace).Create(config); err != nil { t.Fatalf("Couldn't create DeploymentConfig: %v", err) } // verify the initial deployment exists event := <-rcWatch.ResultChan() if e, a := watchapi.Added, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } deployment := event.Object.(*kapi.ReplicationController) if e, a := config.Name, deployutil.DeploymentConfigNameFor(deployment); e != a { t.Fatalf("Expected deployment annotated with deploymentConfig '%s', got '%s'", e, a) } assertEnvVarEquals("ENV1", "VAL1", deployment, t) retryErr := kclient.RetryOnConflict(wait.Backoff{Steps: maxUpdateRetries}, func() error { // submit a new config with an updated environment variable config, err := osClient.DeploymentConfigs(namespace).Generate(config.Name) if err != nil { return err } config.Spec.Template.Spec.Containers[0].Env[0].Value = "UPDATED" // before we update the config, we need to update the state of the existing deployment // this is required to be done manually since the deployment and deployer pod controllers are not run in this test // get this live or conflicts will never end up resolved liveDeployment, err := kubeClient.ReplicationControllers(deployment.Namespace).Get(deployment.Name) if err != nil { return err } liveDeployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusComplete) // update the deployment if _, err := kubeClient.ReplicationControllers(namespace).Update(liveDeployment); err != nil { return err } event = <-rcWatch.ResultChan() if e, a := watchapi.Modified, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } if _, err := osClient.DeploymentConfigs(namespace).Update(config); err != nil { return err } return nil }) if retryErr != nil { t.Fatal(retryErr) } var newDeployment *kapi.ReplicationController for { event = <-rcWatch.ResultChan() if event.Type != watchapi.Added { // Discard modifications which could be applied to the original RC, etc. continue } newDeployment = event.Object.(*kapi.ReplicationController) break } assertEnvVarEquals("ENV1", "UPDATED", newDeployment, t) if newDeployment.Name == deployment.Name { t.Fatalf("expected new deployment; old=%s, new=%s", deployment.Name, newDeployment.Name) } }
// Describe returns the description of a DeploymentConfig func (d *DeploymentConfigDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { var deploymentConfig *deployapi.DeploymentConfig if d.config != nil { // If a deployment config is already provided use that. // This is used by `oc rollback --dry-run`. deploymentConfig = d.config } else { var err error deploymentConfig, err = d.osClient.DeploymentConfigs(namespace).Get(name) if err != nil { return "", err } } return tabbedString(func(out *tabwriter.Writer) error { formatMeta(out, deploymentConfig.ObjectMeta) var ( deploymentsHistory []kapi.ReplicationController activeDeploymentName string ) if d.config == nil { if rcs, err := d.kubeClient.ReplicationControllers(namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(deploymentConfig.Name)}); err == nil { deploymentsHistory = rcs.Items } } if deploymentConfig.Status.LatestVersion == 0 { formatString(out, "Latest Version", "Not deployed") } else { formatString(out, "Latest Version", strconv.FormatInt(deploymentConfig.Status.LatestVersion, 10)) } printDeploymentConfigSpec(d.kubeClient, *deploymentConfig, out) fmt.Fprintln(out) latestDeploymentName := deployutil.LatestDeploymentNameForConfig(deploymentConfig) if activeDeployment := deployutil.ActiveDeployment(deploymentConfig, deploymentsHistory); activeDeployment != nil { activeDeploymentName = activeDeployment.Name } var deployment *kapi.ReplicationController isNotDeployed := len(deploymentsHistory) == 0 for _, item := range deploymentsHistory { if item.Name == latestDeploymentName { deployment = &item } } if isNotDeployed { formatString(out, "Latest Deployment", "<none>") } else { header := fmt.Sprintf("Deployment #%d (latest)", deployutil.DeploymentVersionFor(deployment)) // Show details if the current deployment is the active one or it is the // initial deployment. printDeploymentRc(deployment, d.kubeClient, out, header, (deployment.Name == activeDeploymentName) || len(deploymentsHistory) == 1) } // We don't show the deployment history when running `oc rollback --dry-run`. if d.config == nil && !isNotDeployed { sorted := deploymentsHistory sort.Sort(sort.Reverse(rcutils.OverlappingControllers(sorted))) counter := 1 for _, item := range sorted { if item.Name != latestDeploymentName && deploymentConfig.Name == deployutil.DeploymentConfigNameFor(&item) { header := fmt.Sprintf("Deployment #%d", deployutil.DeploymentVersionFor(&item)) printDeploymentRc(&item, d.kubeClient, out, header, item.Name == activeDeploymentName) counter++ } if counter == maxDisplayDeployments { break } } } if settings.ShowEvents { // Events if events, err := d.kubeClient.Events(deploymentConfig.Namespace).Search(deploymentConfig); err == nil && events != nil { latestDeploymentEvents := &kapi.EventList{Items: []kapi.Event{}} for i := len(events.Items); i != 0 && i > len(events.Items)-maxDisplayDeploymentsEvents; i-- { latestDeploymentEvents.Items = append(latestDeploymentEvents.Items, events.Items[i-1]) } fmt.Fprintln(out) kctl.DescribeEvents(latestDeploymentEvents, out) } } return nil }) }
func BelongsToDeploymentConfig(config *deployapi.DeploymentConfig, b *kapi.ReplicationController) bool { if b.Annotations != nil { return config.Name == deployutil.DeploymentConfigNameFor(b) } return false }
func (c *DeployerPodController) cleanupFailedDeployment(deployment *kapi.ReplicationController) error { // Scale down the current failed deployment configName := deployutil.DeploymentConfigNameFor(deployment) existingDeployments, err := c.deploymentClient.listDeploymentsForConfig(deployment.Namespace, configName) if err != nil { return fmt.Errorf("couldn't list Deployments for DeploymentConfig %s: %v", configName, err) } desiredReplicas, ok := deployutil.DeploymentDesiredReplicas(deployment) if !ok { // if desired replicas could not be found, then log the error // and update the failed deployment // this cannot be treated as a transient error kutil.HandleError(fmt.Errorf("Could not determine desired replicas from %s to reset replicas for last completed deployment", deployutil.LabelForDeployment(deployment))) } if ok && len(existingDeployments.Items) > 0 { sort.Sort(deployutil.DeploymentsByLatestVersionDesc(existingDeployments.Items)) for index, existing := range existingDeployments.Items { // if a newer deployment exists: // - set the replicas for the current failed deployment to 0 // - there is no point in scaling up the last completed deployment // since that will be scaled down by the later deployment if index == 0 && existing.Name != deployment.Name { break } // the latest completed deployment is the one that needs to be scaled back up if deployutil.DeploymentStatusFor(&existing) == deployapi.DeploymentStatusComplete { if existing.Spec.Replicas == desiredReplicas { break } // scale back the completed deployment to the target of the failed deployment existing.Spec.Replicas = desiredReplicas if _, err := c.deploymentClient.updateDeployment(existing.Namespace, &existing); err != nil { if kerrors.IsNotFound(err) { return nil } return fmt.Errorf("couldn't update replicas to %d for deployment %s: %v", desiredReplicas, deployutil.LabelForDeployment(&existing), err) } glog.V(4).Infof("Updated replicas to %d for deployment %s", desiredReplicas, deployutil.LabelForDeployment(&existing)) break } } } // set the replicas for the failed deployment to 0 // and set the status to Failed deployment.Spec.Replicas = 0 deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusFailed) if _, err := c.deploymentClient.updateDeployment(deployment.Namespace, deployment); err != nil { if kerrors.IsNotFound(err) { return nil } return fmt.Errorf("couldn't scale down the deployment %s and mark it as failed: %v", deployutil.LabelForDeployment(deployment), err) } glog.V(4).Infof("Scaled down the deployment %s and marked it as failed", deployutil.LabelForDeployment(deployment)) return nil }
// Handle syncs pod's status with any associated deployment. func (c *DeployerPodController) Handle(pod *kapi.Pod) error { // Find the deployment associated with the deployer pod. deploymentName := deployutil.DeploymentNameFor(pod) if len(deploymentName) == 0 { return nil } // Reject updates to anything but the main deployer pod // TODO: Find a way to filter this on the watch side. if pod.Name != deployutil.DeployerPodNameForDeployment(deploymentName) { return nil } deployment := &kapi.ReplicationController{ObjectMeta: kapi.ObjectMeta{Namespace: pod.Namespace, Name: deploymentName}} cached, exists, err := c.store.Get(deployment) if err == nil && exists { // Try to use the cache first. Trust hits and return them. deployment = cached.(*kapi.ReplicationController) } else { // Double-check with the master for cache misses/errors, since those // are rare and API calls are expensive but more reliable. deployment, err = c.kClient.ReplicationControllers(pod.Namespace).Get(deploymentName) } // If the deployment for this pod has disappeared, we should clean up this // and any other deployer pods, then bail out. if err != nil { // Some retrieval error occurred. Retry. if !kerrors.IsNotFound(err) { return fmt.Errorf("couldn't get deployment %s/%s which owns deployer pod %s/%s", pod.Namespace, deploymentName, pod.Name, pod.Namespace) } // Find all the deployer pods for the deployment (including this one). opts := kapi.ListOptions{LabelSelector: deployutil.DeployerPodSelector(deploymentName)} deployers, err := c.kClient.Pods(pod.Namespace).List(opts) if err != nil { // Retry. return fmt.Errorf("couldn't get deployer pods for %s: %v", deployutil.LabelForDeployment(deployment), err) } // Delete all deployers. for _, deployer := range deployers.Items { err := c.kClient.Pods(deployer.Namespace).Delete(deployer.Name, kapi.NewDeleteOptions(0)) if err != nil { if !kerrors.IsNotFound(err) { // TODO: Should this fire an event? glog.V(2).Infof("Couldn't delete orphaned deployer pod %s/%s: %v", deployer.Namespace, deployer.Name, err) } } else { // TODO: Should this fire an event? glog.V(2).Infof("Deleted orphaned deployer pod %s/%s", deployer.Namespace, deployer.Name) } } return nil } currentStatus := deployutil.DeploymentStatusFor(deployment) nextStatus := currentStatus switch pod.Status.Phase { case kapi.PodRunning: nextStatus = deployapi.DeploymentStatusRunning case kapi.PodSucceeded: nextStatus = deployapi.DeploymentStatusComplete config, decodeErr := c.decodeConfig(deployment) // If the deployment was cancelled just prior to the deployer pod succeeding // then we need to remove the cancel annotations from the complete deployment // and emit an event letting users know their cancellation failed. if deployutil.IsDeploymentCancelled(deployment) { delete(deployment.Annotations, deployapi.DeploymentCancelledAnnotation) delete(deployment.Annotations, deployapi.DeploymentStatusReasonAnnotation) if decodeErr == nil { c.recorder.Eventf(config, kapi.EventTypeWarning, "FailedCancellation", "Deployment %q succeeded before cancel recorded", deployutil.LabelForDeployment(deployment)) } else { c.recorder.Event(deployment, kapi.EventTypeWarning, "FailedCancellation", "Succeeded before cancel recorded") } } // Sync the internal replica annotation with the target so that we can // distinguish deployer updates from other scaling events. deployment.Annotations[deployapi.DeploymentReplicasAnnotation] = deployment.Annotations[deployapi.DesiredReplicasAnnotation] if nextStatus == deployapi.DeploymentStatusComplete { delete(deployment.Annotations, deployapi.DesiredReplicasAnnotation) } // reset the size of any test container, since we are the ones updating the RC if decodeErr == nil && config.Spec.Test { deployment.Spec.Replicas = 0 } case kapi.PodFailed: nextStatus = deployapi.DeploymentStatusFailed // reset the size of any test container, since we are the ones updating the RC if config, err := c.decodeConfig(deployment); err == nil && config.Spec.Test { deployment.Spec.Replicas = 0 } } if deployutil.CanTransitionPhase(currentStatus, nextStatus) { deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus) if _, err := c.kClient.ReplicationControllers(deployment.Namespace).Update(deployment); err != nil { if kerrors.IsNotFound(err) { return nil } return fmt.Errorf("couldn't update Deployment %s to status %s: %v", deployutil.LabelForDeployment(deployment), nextStatus, err) } glog.V(4).Infof("Updated deployment %s status from %s to %s (scale: %d)", deployutil.LabelForDeployment(deployment), currentStatus, nextStatus, deployment.Spec.Replicas) // If the deployment was canceled, trigger a reconcilation of its deployment config // so that the latest complete deployment can immediately rollback in place of the // canceled deployment. if nextStatus == deployapi.DeploymentStatusFailed && deployutil.IsDeploymentCancelled(deployment) { // If we are unable to get the deployment config, then the deploymentconfig controller will // perform its duties once the resync interval forces the deploymentconfig to be reconciled. name := deployutil.DeploymentConfigNameFor(deployment) kclient.RetryOnConflict(kclient.DefaultRetry, func() error { config, err := c.client.DeploymentConfigs(deployment.Namespace).Get(name) if err != nil { return err } if config.Annotations == nil { config.Annotations = make(map[string]string) } config.Annotations[deployapi.DeploymentCancelledAnnotation] = strconv.Itoa(config.Status.LatestVersion) _, err = c.client.DeploymentConfigs(config.Namespace).Update(config) return err }) } } return nil }
// Run performs a rollback. func (o *RollbackOptions) Run() error { // Get the resource referenced in the command args. obj, err := o.findResource(o.TargetName) if err != nil { return err } // Interpret the resource to resolve a target for rollback. var target *kapi.ReplicationController switch r := obj.(type) { case *kapi.ReplicationController: dcName := deployutil.DeploymentConfigNameFor(r) dc, err := o.oc.DeploymentConfigs(r.Namespace).Get(dcName) if err != nil { return err } if dc.Spec.Paused { return fmt.Errorf("cannot rollback a paused deployment config") } // A specific deployment was used. target = r case *deployapi.DeploymentConfig: if r.Spec.Paused { return fmt.Errorf("cannot rollback a paused deployment config") } // A deploymentconfig was used. Find the target deployment by the // specified version, or by a lookup of the last completed deployment if // no version was supplied. deployment, err := o.findTargetDeployment(r, o.DesiredVersion) if err != nil { return err } target = deployment } if target == nil { return fmt.Errorf("%s is not a valid deployment or deployment config", o.TargetName) } // Set up the rollback and generate a new rolled back config. rollback := &deployapi.DeploymentConfigRollback{ Spec: deployapi.DeploymentConfigRollbackSpec{ From: kapi.ObjectReference{ Name: target.Name, }, IncludeTemplate: true, IncludeTriggers: o.IncludeTriggers, IncludeStrategy: o.IncludeStrategy, IncludeReplicationMeta: o.IncludeScalingSettings, }, } newConfig, err := o.oc.DeploymentConfigs(o.Namespace).Rollback(rollback) if err != nil { return err } // If this is a dry run, print and exit. if o.DryRun { describer := describe.NewDeploymentConfigDescriber(o.oc, o.kc, newConfig) description, err := describer.Describe(newConfig.Namespace, newConfig.Name, kubectl.DescriberSettings{}) if err != nil { return err } o.out.Write([]byte(description)) return nil } // If an output format is specified, print and exit. if len(o.Format) > 0 { printer, _, err := kubectl.GetPrinter(o.Format, o.Template) if err != nil { return err } versionedPrinter := kubectl.NewVersionedPrinter(printer, kapi.Scheme, latest.Version) versionedPrinter.PrintObj(newConfig, o.out) return nil } // Perform a real rollback. rolledback, err := o.oc.DeploymentConfigs(newConfig.Namespace).Update(newConfig) if err != nil { return err } // Print warnings about any image triggers disabled during the rollback. fmt.Fprintf(o.out, "#%d rolled back to %s\n", rolledback.Status.LatestVersion, rollback.Spec.From.Name) for _, trigger := range rolledback.Spec.Triggers { disabled := []string{} if trigger.Type == deployapi.DeploymentTriggerOnImageChange && !trigger.ImageChangeParams.Automatic { disabled = append(disabled, trigger.ImageChangeParams.From.Name) } if len(disabled) > 0 { reenable := fmt.Sprintf("oc deploy %s --enable-triggers -n %s", rolledback.Name, o.Namespace) fmt.Fprintf(o.out, "Warning: the following images triggers were disabled: %s\n You can re-enable them with: %s\n", strings.Join(disabled, ","), reenable) } } return nil }
// Describe returns the description of a DeploymentConfig func (d *DeploymentConfigDescriber) Describe(namespace, name string, settings kctl.DescriberSettings) (string, error) { var deploymentConfig *deployapi.DeploymentConfig if d.config != nil { // If a deployment config is already provided use that. // This is used by `oc rollback --dry-run`. deploymentConfig = d.config } else { var err error deploymentConfig, err = d.osClient.DeploymentConfigs(namespace).Get(name) if err != nil { return "", err } } return tabbedString(func(out *tabwriter.Writer) error { formatMeta(out, deploymentConfig.ObjectMeta) if deploymentConfig.Status.LatestVersion == 0 { formatString(out, "Latest Version", "Not deployed") } else { formatString(out, "Latest Version", strconv.FormatInt(deploymentConfig.Status.LatestVersion, 10)) } printDeploymentConfigSpec(d.kubeClient, *deploymentConfig, out) fmt.Fprintln(out) if deploymentConfig.Status.Details != nil && len(deploymentConfig.Status.Details.Message) > 0 { fmt.Fprintf(out, "Warning:\t%s\n", deploymentConfig.Status.Details.Message) } deploymentName := deployutil.LatestDeploymentNameForConfig(deploymentConfig) deployment, err := d.kubeClient.ReplicationControllers(namespace).Get(deploymentName) if err != nil { if kerrors.IsNotFound(err) { formatString(out, "Latest Deployment", "<none>") } else { formatString(out, "Latest Deployment", fmt.Sprintf("error: %v", err)) } } else { header := fmt.Sprintf("Deployment #%d (latest)", deployutil.DeploymentVersionFor(deployment)) printDeploymentRc(deployment, d.kubeClient, out, header, true) } // We don't show the deployment history when running `oc rollback --dry-run`. if d.config == nil { deploymentsHistory, err := d.kubeClient.ReplicationControllers(namespace).List(kapi.ListOptions{LabelSelector: labels.Everything()}) if err == nil { sorted := deploymentsHistory.Items sort.Sort(sort.Reverse(rcutils.OverlappingControllers(sorted))) counter := 1 for _, item := range sorted { if item.Name != deploymentName && deploymentConfig.Name == deployutil.DeploymentConfigNameFor(&item) { header := fmt.Sprintf("Deployment #%d", deployutil.DeploymentVersionFor(&item)) printDeploymentRc(&item, d.kubeClient, out, header, false) counter++ } if counter == maxDisplayDeployments { break } } } } if settings.ShowEvents { // Events if events, err := d.kubeClient.Events(deploymentConfig.Namespace).Search(deploymentConfig); err == nil && events != nil { latestDeploymentEvents := &kapi.EventList{Items: []kapi.Event{}} for i := len(events.Items); i != 0 && i > len(events.Items)-maxDisplayDeploymentsEvents; i-- { latestDeploymentEvents.Items = append(latestDeploymentEvents.Items, events.Items[i-1]) } fmt.Fprintln(out) kctl.DescribeEvents(latestDeploymentEvents, out) } } return nil }) }
func TestTriggers_configChange(t *testing.T) { testutil.DeleteAllEtcdKeys() openshift := NewTestDeployOpenshift(t) defer openshift.Close() config := deploytest.OkDeploymentConfig(0) config.Namespace = testutil.Namespace() config.Triggers[0] = deploytest.OkConfigChangeTrigger() var err error watch, err := openshift.KubeClient.ReplicationControllers(testutil.Namespace()).Watch(labels.Everything(), fields.Everything(), "0") if err != nil { t.Fatalf("Couldn't subscribe to Deployments %v", err) } defer watch.Stop() // submit the initial deployment config if _, err := openshift.Client.DeploymentConfigs(testutil.Namespace()).Create(config); err != nil { t.Fatalf("Couldn't create DeploymentConfig: %v", err) } // verify the initial deployment exists event := <-watch.ResultChan() if e, a := watchapi.Added, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } deployment := event.Object.(*kapi.ReplicationController) if e, a := config.Name, deployutil.DeploymentConfigNameFor(deployment); e != a { t.Fatalf("Expected deployment annotated with deploymentConfig '%s', got '%s'", e, a) } assertEnvVarEquals("ENV1", "VAL1", deployment, t) // submit a new config with an updated environment variable if config, err = openshift.Client.DeploymentConfigs(testutil.Namespace()).Generate(config.Name); err != nil { t.Fatalf("Error generating config: %v", err) } config.Template.ControllerTemplate.Template.Spec.Containers[0].Env[0].Value = "UPDATED" // before we update the config, we need to update the state of the existing deployment // this is required to be done manually since the deployment and deployer pod controllers are not run in this test deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusComplete) // update the deployment if _, err = openshift.KubeClient.ReplicationControllers(testutil.Namespace()).Update(deployment); err != nil { t.Fatalf("Error updating existing deployment: %v", err) } event = <-watch.ResultChan() if e, a := watchapi.Modified, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } if _, err := openshift.Client.DeploymentConfigs(testutil.Namespace()).Update(config); err != nil { t.Fatalf("Couldn't create updated DeploymentConfig: %v", err) } event = <-watch.ResultChan() if e, a := watchapi.Added, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } newDeployment := event.Object.(*kapi.ReplicationController) assertEnvVarEquals("ENV1", "UPDATED", newDeployment, t) if newDeployment.Name == deployment.Name { t.Fatalf("expected new deployment; old=%s, new=%s", deployment.Name, newDeployment.Name) } }
func TestTriggers_manual(t *testing.T) { const namespace = "test-triggers-manual" testutil.RequireEtcd(t) defer testutil.DumpEtcdOnFailure(t) _, clusterAdminKubeConfig, err := testserver.StartTestMaster() if err != nil { t.Fatal(err) } clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) if err != nil { t.Fatal(err) } clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Fatal(err) } _, err = testserver.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, namespace, "my-test-user") if err != nil { t.Fatal(err) } osClient, kubeClient, _, err := testutil.GetClientForUser(*clusterAdminClientConfig, "my-test-user") if err != nil { t.Fatal(err) } config := deploytest.OkDeploymentConfig(0) config.Namespace = namespace config.Spec.Triggers = []deployapi.DeploymentTriggerPolicy{{Type: deployapi.DeploymentTriggerManual}} dc, err := osClient.DeploymentConfigs(namespace).Create(config) if err != nil { t.Fatalf("Couldn't create DeploymentConfig: %v %#v", err, config) } rcWatch, err := kubeClient.ReplicationControllers(namespace).Watch(kapi.ListOptions{ResourceVersion: dc.ResourceVersion}) if err != nil { t.Fatalf("Couldn't subscribe to Deployments: %v", err) } defer rcWatch.Stop() request := &deployapi.DeploymentRequest{ Name: config.Name, Latest: false, Force: true, } config, err = osClient.DeploymentConfigs(namespace).Instantiate(request) if err != nil { t.Fatalf("Couldn't instantiate deployment config %q: %v", config.Name, err) } if config.Status.LatestVersion != 1 { t.Fatal("Instantiated deployment config should have version 1") } if config.Status.Details == nil || len(config.Status.Details.Causes) == 0 { t.Fatal("Instantiated deployment config should have a cause of deployment") } gotType := config.Status.Details.Causes[0].Type if gotType != deployapi.DeploymentTriggerManual { t.Fatalf("Instantiated deployment config should have a %q cause of deployment instead of %q", deployapi.DeploymentTriggerManual, gotType) } event := <-rcWatch.ResultChan() if e, a := watchapi.Added, event.Type; e != a { t.Fatalf("expected watch event type %s, got %s", e, a) } deployment := event.Object.(*kapi.ReplicationController) if e, a := config.Name, deployutil.DeploymentConfigNameFor(deployment); e != a { t.Fatalf("Expected deployment annotated with deploymentConfig '%s', got '%s'", e, a) } if e, a := int64(1), deployutil.DeploymentVersionFor(deployment); e != a { t.Fatalf("Deployment annotation version does not match: %#v", deployment) } }