func testScalePausedDeployment(f *framework.Framework) { ns := f.Namespace.Name c := adapter.FromUnversionedClient(f.Client) podLabels := map[string]string{"name": nginxImageName} replicas := int32(3) // Create a nginx deployment. deploymentName := "nginx-deployment" d := newDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil) framework.Logf("Creating deployment %q", deploymentName) _, err := c.Extensions().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) defer stopDeployment(c, f.Client, ns, deploymentName) // Check that deployment is created fine. deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) Expect(err).NotTo(HaveOccurred()) rs, err := deploymentutil.GetNewReplicaSet(deployment, c) Expect(err).NotTo(HaveOccurred()) // Pause the deployment and try to scale it. deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { update.Spec.Paused = true }) Expect(err).NotTo(HaveOccurred()) // Scale the paused deployment. framework.Logf("Scaling up the paused deployment %q", deploymentName) newReplicas := int32(5) deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { update.Spec.Replicas = newReplicas }) Expect(err).NotTo(HaveOccurred()) err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) Expect(err).NotTo(HaveOccurred()) rs, err = deploymentutil.GetNewReplicaSet(deployment, c) Expect(err).NotTo(HaveOccurred()) if rs.Spec.Replicas != newReplicas { err = fmt.Errorf("Expected %d replicas for the new replica set, got %d", newReplicas, rs.Spec.Replicas) Expect(err).NotTo(HaveOccurred()) } }
func testNewDeployment(f *framework.Framework) { ns := f.Namespace.Name // TODO: remove unversionedClient when the refactoring is done. Currently some // functions like verifyPod still expects a unversioned#Client. c := adapter.FromUnversionedClient(f.Client) deploymentName := "test-new-deployment" podLabels := map[string]string{"name": nginxImageName} replicas := int32(1) framework.Logf("Creating simple deployment %s", deploymentName) d := newDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil) d.Annotations = map[string]string{"test": "should-copy-to-replica-set", annotations.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} _, err := c.Extensions().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) defer stopDeployment(c, f.Client, ns, deploymentName) // Wait for it to be updated to revision 1 err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", nginxImage) Expect(err).NotTo(HaveOccurred()) err = framework.WaitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0) Expect(err).NotTo(HaveOccurred()) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) newRS, err := deploymentutil.GetNewReplicaSet(deployment, c) Expect(err).NotTo(HaveOccurred()) // Check new RS annotations Expect(newRS.Annotations["test"]).Should(Equal("should-copy-to-replica-set")) Expect(newRS.Annotations[annotations.LastAppliedConfigAnnotation]).Should(Equal("")) Expect(deployment.Annotations["test"]).Should(Equal("should-copy-to-replica-set")) Expect(deployment.Annotations[annotations.LastAppliedConfigAnnotation]).Should(Equal("should-not-copy-to-replica-set")) }
func testRecreateDeployment(f *framework.Framework) { ns := f.Namespace.Name // TODO: remove unversionedClient when the refactoring is done. Currently some // functions like verifyPod still expects a unversioned#Client. unversionedClient := f.Client c := adapter.FromUnversionedClient(unversionedClient) // Create nginx pods. deploymentPodLabels := map[string]string{"name": "sample-pod-3"} rsPodLabels := map[string]string{ "name": "sample-pod-3", "pod": nginxImageName, } rsName := "test-recreate-controller" replicas := int32(3) _, err := c.Extensions().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, nginxImageName, nginxImage)) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. err = framework.VerifyPods(unversionedClient, ns, "sample-pod-3", false, 3) if err != nil { framework.Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) } // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "test-recreate-deployment" framework.Logf("Creating deployment %s", deploymentName) _, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, redisImageName, redisImage, extensions.RecreateDeploymentStrategyType, nil)) Expect(err).NotTo(HaveOccurred()) defer stopDeployment(c, f.Client, ns, deploymentName) // Wait for it to be updated to revision 1 err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", redisImage) Expect(err).NotTo(HaveOccurred()) err = framework.WaitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0) Expect(err).NotTo(HaveOccurred()) // Verify that the pods were scaled up and down as expected. We use events to verify that. deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) framework.WaitForEvents(unversionedClient, ns, deployment, 2) events, err := c.Core().Events(ns).Search(deployment) if err != nil { framework.Logf("error in listing events: %s", err) Expect(err).NotTo(HaveOccurred()) } // There should be 2 events, one to scale up the new ReplicaSet and then to scale down the old ReplicaSet. Expect(len(events.Items)).Should(Equal(2)) newRS, err := deploymentutil.GetNewReplicaSet(deployment, c) Expect(err).NotTo(HaveOccurred()) Expect(newRS).NotTo(Equal(nil)) Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled down replica set %s to 0", rsName))) Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled up replica set %s to 3", newRS.Name))) }
// checkDeploymentRevision checks if the input deployment's and its new replica set's revision and images are as expected. func checkDeploymentRevision(c *clientset.Clientset, ns, deploymentName, revision, imageName, image string) (*extensions.Deployment, *extensions.ReplicaSet) { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) // Check revision of the new replica set of this deployment newRS, err := deploymentutil.GetNewReplicaSet(deployment, c) Expect(err).NotTo(HaveOccurred()) Expect(newRS.Annotations).NotTo(Equal(nil)) Expect(newRS.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision)) // Check revision of This deployment Expect(deployment.Annotations).NotTo(Equal(nil)) Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal(revision)) if len(imageName) > 0 { // Check the image the new replica set creates Expect(newRS.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName)) Expect(newRS.Spec.Template.Spec.Containers[0].Image).Should(Equal(image)) // Check the image the deployment creates Expect(deployment.Spec.Template.Spec.Containers[0].Name).Should(Equal(imageName)) Expect(deployment.Spec.Template.Spec.Containers[0].Image).Should(Equal(image)) } return deployment, newRS }
func testPausedDeployment(f *framework.Framework) { ns := f.Namespace.Name // TODO: remove unversionedClient when the refactoring is done. Currently some // functions like verifyPod still expects a unversioned#Client. unversionedClient := f.Client c := adapter.FromUnversionedClient(unversionedClient) deploymentName := "test-paused-deployment" podLabels := map[string]string{"name": nginxImageName} d := newDeployment(deploymentName, 1, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil) d.Spec.Paused = true tgps := int64(20) d.Spec.Template.Spec.TerminationGracePeriodSeconds = &tgps framework.Logf("Creating paused deployment %s", deploymentName) _, err := c.Extensions().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) defer stopDeployment(c, f.Client, ns, deploymentName) // Check that deployment is created fine. deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) // Verify that there is no latest state realized for the new deployment. rs, err := deploymentutil.GetNewReplicaSet(deployment, c) Expect(err).NotTo(HaveOccurred()) if rs != nil { err = fmt.Errorf("unexpected new rs/%s for deployment/%s", rs.Name, deployment.Name) Expect(err).NotTo(HaveOccurred()) } // Update the deployment to run deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { update.Spec.Paused = false }) Expect(err).NotTo(HaveOccurred()) // Use observedGeneration to determine if the controller noticed the resume. err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) Expect(err).NotTo(HaveOccurred()) selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) if err != nil { Expect(err).NotTo(HaveOccurred()) } opts := api.ListOptions{LabelSelector: selector} w, err := c.Extensions().ReplicaSets(ns).Watch(opts) Expect(err).NotTo(HaveOccurred()) select { case <-w.ResultChan(): // this is it case <-time.After(time.Minute): err = fmt.Errorf("expected a new replica set to be created") Expect(err).NotTo(HaveOccurred()) } // Pause the deployment and delete the replica set. // The paused deployment shouldn't recreate a new one. deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { update.Spec.Paused = true }) Expect(err).NotTo(HaveOccurred()) // Use observedGeneration to determine if the controller noticed the pause. err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) Expect(err).NotTo(HaveOccurred()) // Update the deployment template - the new replicaset should stay the same framework.Logf("Updating paused deployment %q", deploymentName) newTGPS := int64(40) deployment, err = framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) { update.Spec.Template.Spec.TerminationGracePeriodSeconds = &newTGPS }) Expect(err).NotTo(HaveOccurred()) err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) Expect(err).NotTo(HaveOccurred()) framework.Logf("Looking for new replicaset for paused deployment %q (there should be none)", deploymentName) newRS, err := deploymentutil.GetNewReplicaSet(deployment, c) Expect(err).NotTo(HaveOccurred()) if newRS != nil { err = fmt.Errorf("No replica set should match the deployment template but there is %q", newRS.Name) Expect(err).NotTo(HaveOccurred()) } _, allOldRs, err := deploymentutil.GetOldReplicaSets(deployment, c) Expect(err).NotTo(HaveOccurred()) if len(allOldRs) != 1 { err = fmt.Errorf("expected an old replica set") Expect(err).NotTo(HaveOccurred()) } framework.Logf("Comparing deployment diff with old replica set %q", allOldRs[0].Name) if *allOldRs[0].Spec.Template.Spec.TerminationGracePeriodSeconds == newTGPS { err = fmt.Errorf("TerminationGracePeriodSeconds on the replica set should be %d but is %d", tgps, newTGPS) Expect(err).NotTo(HaveOccurred()) } }