func main() { runtime.GOMAXPROCS(runtime.NumCPU()) config := HollowNodeConfig{} config.addFlags(pflag.CommandLine) util.InitFlags() if !knownMorphs.Has(config.Morph) { glog.Fatal("Unknown morph: %v. Allowed values: %v", config.Morph, knownMorphs.List()) } // create a client to communicate with API server. cl, err := createClientFromFile(config.KubeconfigPath) clientset := clientset.FromUnversionedClient(cl) if err != nil { glog.Fatal("Failed to create a Client. Exiting.") } if config.Morph == "kubelet" { cadvisorInterface := new(cadvisor.Fake) containerManager := cm.NewStubContainerManager() fakeDockerClient := dockertools.NewFakeDockerClient() fakeDockerClient.VersionInfo = docker.Env{"Version=1.1.3", "ApiVersion=1.18"} fakeDockerClient.EnableSleep = true hollowKubelet := kubemark.NewHollowKubelet( config.NodeName, clientset, cadvisorInterface, fakeDockerClient, config.KubeletPort, config.KubeletReadOnlyPort, containerManager, ) hollowKubelet.Run() } if config.Morph == "proxy" { eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "kube-proxy", Host: config.NodeName}) iptInterface := fakeiptables.NewFake() serviceConfig := proxyconfig.NewServiceConfig() serviceConfig.RegisterHandler(&kubemark.FakeProxyHandler{}) endpointsConfig := proxyconfig.NewEndpointsConfig() endpointsConfig.RegisterHandler(&kubemark.FakeProxyHandler{}) hollowProxy := kubemark.NewHollowProxyOrDie(config.NodeName, cl, endpointsConfig, serviceConfig, iptInterface, eventBroadcaster, recorder) hollowProxy.Run() } }
// beforeEach gets a client and makes a namespace. func (f *Framework) beforeEach() { // The fact that we need this feels like a bug in ginkgo. // https://github.com/onsi/ginkgo/issues/222 f.cleanupHandle = AddCleanupAction(f.afterEach) By("Creating a kubernetes client") config, err := loadConfig() Expect(err).NotTo(HaveOccurred()) config.QPS = f.options.clientQPS config.Burst = f.options.clientBurst c, err := loadClientFromConfig(config) Expect(err).NotTo(HaveOccurred()) f.Client = c f.Clientset_1_2 = release_1_2.FromUnversionedClient(c) By("Building a namespace api object") namespace, err := f.CreateNamespace(f.BaseName, map[string]string{ "e2e-framework": f.BaseName, }) Expect(err).NotTo(HaveOccurred()) f.Namespace = namespace if testContext.VerifyServiceAccount { By("Waiting for a default service account to be provisioned in namespace") err = waitForDefaultServiceAccountInNamespace(c, namespace.Name) Expect(err).NotTo(HaveOccurred()) } else { Logf("Skipping waiting for service account") } if testContext.GatherKubeSystemResourceUsageData { f.gatherer, err = NewResourceUsageGatherer(c) if err != nil { Logf("Error while creating NewResourceUsageGatherer: %v", err) } else { go f.gatherer.startGatheringData() } } if testContext.GatherLogsSizes { f.logsSizeWaitGroup = sync.WaitGroup{} f.logsSizeWaitGroup.Add(1) f.logsSizeCloseChannel = make(chan bool) f.logsSizeVerifier = NewLogsVerifier(c, f.logsSizeCloseChannel) go func() { f.logsSizeVerifier.Run() f.logsSizeWaitGroup.Done() }() } }
func testRollingUpdateDeployment(f *Framework) { ns := f.Namespace.Name // TODO: remove unversionedClient when the refactoring is done. Currently some // functions like verifyPod still expects a unversioned#Client. unversionedClient := f.Client c := clientset.FromUnversionedClient(unversionedClient) // Create nginx pods. deploymentPodLabels := map[string]string{"name": "sample-pod"} rcPodLabels := map[string]string{ "name": "sample-pod", "pod": "nginx", } rcName := "nginx-controller" replicas := 3 _, err := c.Legacy().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx")) Expect(err).NotTo(HaveOccurred()) defer func() { Logf("deleting replication controller %s", rcName) Expect(c.Legacy().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred()) }() // Verify that the required pods have come up. err = verifyPods(unversionedClient, ns, "sample-pod", false, 3) if err != nil { Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) } // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "redis-deployment" Logf("Creating deployment %s", deploymentName) _, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, nil)) Expect(err).NotTo(HaveOccurred()) defer func() { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) Logf("deleting deployment %s", deploymentName) Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred()) // TODO: remove this once we can delete rcs with deployment newRC, err := deploymentutil.GetNewRC(*deployment, c) Expect(err).NotTo(HaveOccurred()) Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred()) }() err = waitForDeploymentStatus(c, ns, deploymentName, replicas, replicas-1, replicas+1, 0) Expect(err).NotTo(HaveOccurred()) // Check if it's updated to revision 1 correctly checkDeploymentRevision(c, ns, deploymentName, "1", "redis", "redis") }
func testNewDeployment(f *Framework) { ns := f.Namespace.Name // TODO: remove unversionedClient when the refactoring is done. Currently some // functions like verifyPod still expects a unversioned#Client. unversionedClient := f.Client c := clientset.FromUnversionedClient(f.Client) deploymentName := "nginx-deployment" podLabels := map[string]string{"name": "nginx"} replicas := 1 Logf("Creating simple deployment %s", deploymentName) d := newDeployment(deploymentName, replicas, podLabels, "nginx", "nginx", extensions.RollingUpdateDeploymentStrategyType, nil) d.Annotations = map[string]string{"test": "should-copy-to-RC", kubectl.LastAppliedConfigAnnotation: "should-not-copy-to-RC"} _, err := c.Extensions().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) defer func() { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) Logf("deleting deployment %s", deploymentName) Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred()) // TODO: remove this once we can delete rcs with deployment newRC, err := deploymentutil.GetNewRC(*deployment, c) Expect(err).NotTo(HaveOccurred()) Expect(c.Core().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred()) }() // Check that deployment is created fine. deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. err = verifyPods(unversionedClient, ns, "nginx", false, replicas) if err != nil { Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) } // DeploymentStatus should be appropriately updated. deployment, err = c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) Expect(deployment.Status.Replicas).Should(Equal(replicas)) Expect(deployment.Status.UpdatedReplicas).Should(Equal(replicas)) // Check if it's updated to revision 1 correctly _, newRC := checkDeploymentRevision(c, ns, deploymentName, "1", "nginx", "nginx") // Check other annotations Expect(newRC.Annotations["test"]).Should(Equal("should-copy-to-RC")) Expect(newRC.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal("")) Expect(deployment.Annotations["test"]).Should(Equal("should-copy-to-RC")) Expect(deployment.Annotations[kubectl.LastAppliedConfigAnnotation]).Should(Equal("should-not-copy-to-RC")) }
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy func testDeploymentCleanUpPolicy(f *Framework) { ns := f.Namespace.Name unversionedClient := f.Client c := clientset.FromUnversionedClient(unversionedClient) // Create nginx pods. deploymentPodLabels := map[string]string{"name": "cleanup-pod"} rcPodLabels := map[string]string{ "name": "cleanup-pod", "pod": "nginx", } rcName := "nginx-controller" replicas := 1 revisionHistoryLimit := new(int) *revisionHistoryLimit = 0 _, err := c.Legacy().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx")) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. err = verifyPods(unversionedClient, ns, "cleanup-pod", false, 1) if err != nil { Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) } // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "redis-deployment" Logf("Creating deployment %s", deploymentName) _, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RollingUpdateDeploymentStrategyType, revisionHistoryLimit)) Expect(err).NotTo(HaveOccurred()) defer func() { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) Logf("deleting deployment %s", deploymentName) Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred()) // TODO: remove this once we can delete rcs with deployment newRC, err := deploymentutil.GetNewRC(*deployment, c) Expect(err).NotTo(HaveOccurred()) Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred()) }() err = waitForDeploymentOldRCsNum(c, ns, deploymentName, *revisionHistoryLimit) Expect(err).NotTo(HaveOccurred()) }
// beforeEach gets a client and makes a namespace. func (f *Framework) beforeEach() { By("Creating a kubernetes client") c, err := loadClient() Expect(err).NotTo(HaveOccurred()) f.Client = c f.Clientset_1_2 = release_1_2.FromUnversionedClient(c) By("Building a namespace api object") namespace, err := f.nsCreateFunc(f.BaseName, f.Client, map[string]string{ "e2e-framework": f.BaseName, }) Expect(err).NotTo(HaveOccurred()) f.Namespace = namespace if testContext.VerifyServiceAccount { By("Waiting for a default service account to be provisioned in namespace") err = waitForDefaultServiceAccountInNamespace(c, namespace.Name) Expect(err).NotTo(HaveOccurred()) } else { Logf("Skipping waiting for service account") } if testContext.GatherKubeSystemResourceUsageData { f.gatherer.startGatheringData(c, resourceDataGatheringPeriodSeconds*time.Second) } if testContext.GatherLogsSizes { f.logsSizeWaitGroup = sync.WaitGroup{} f.logsSizeWaitGroup.Add(1) f.logsSizeCloseChannel = make(chan bool) f.logsSizeVerifier = NewLogsVerifier(c, f.logsSizeCloseChannel) go func() { f.logsSizeVerifier.Run() f.logsSizeWaitGroup.Done() }() } }
// testRollbackDeploymentRCNoRevision tests that deployment supports rollback even when there's old RC without revision. // An old RC without revision is created, and then a deployment is created (v1). The deployment shouldn't add revision // annotation to the old RC. Then rollback the deployment to last revision, and it should fail and emit related event. // Then update the deployment to v2 and rollback it to v1 should succeed and emit related event, now the deployment // becomes v3. Then rollback the deployment to v10 (doesn't exist in history) should fail and emit related event. // Finally, rollback the deployment (v3) to v3 should be no-op and emit related event. func testRollbackDeploymentRCNoRevision(f *Framework) { ns := f.Namespace.Name unversionedClient := f.Client c := clientset.FromUnversionedClient(f.Client) podName := "nginx" deploymentPodLabels := map[string]string{"name": podName} rcPodLabels := map[string]string{ "name": podName, "pod": "nginx", } rcName := "nginx-controller" rcReplicas := 0 rc := newRC(rcName, rcReplicas, rcPodLabels, "nginx", "nginx") rc.Annotations = make(map[string]string) rc.Annotations["make"] = "difference" _, err := c.Legacy().ReplicationControllers(ns).Create(rc) Expect(err).NotTo(HaveOccurred()) defer func() { Logf("deleting replication controller %s", rcName) Expect(c.Legacy().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred()) }() // Create a deployment to create nginx pods, which have different template than the rc created above. deploymentName, deploymentImageName := "nginx-deployment", "nginx" deploymentReplicas := 1 deploymentImage := "nginx" deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType Logf("Creating deployment %s", deploymentName) d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil) _, err = c.Extensions().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) defer func() { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) Logf("deleting deployment %s", deploymentName) Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred()) // TODO: remove this once we can delete rcs with deployment newRC, err := deploymentutil.GetNewRC(*deployment, c) Expect(err).NotTo(HaveOccurred()) Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred()) oldRCs, _, err := deploymentutil.GetOldRCs(*deployment, c) Expect(err).NotTo(HaveOccurred()) for _, oldRC := range oldRCs { Expect(c.Legacy().ReplicationControllers(ns).Delete(oldRC.Name, nil)).NotTo(HaveOccurred()) } }() // Check that deployment is created fine. deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. err = verifyPods(unversionedClient, ns, "nginx", false, deploymentReplicas) if err != nil { Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) } deployment, err = c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) // DeploymentStatus should be appropriately updated. Expect(deployment.Status.Replicas).Should(Equal(deploymentReplicas)) Expect(deployment.Status.UpdatedReplicas).Should(Equal(deploymentReplicas)) // Check if it's updated to revision 1 correctly checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage) // Check that the rc we created still doesn't contain revision information rc, err = c.Legacy().ReplicationControllers(ns).Get(rcName) Expect(rc.Annotations[deploymentutil.RevisionAnnotation]).Should(Equal("")) // Update the deploymentRollback to rollback to last revision // Since there's only 1 revision in history, it should stay as revision 1 revision := int64(0) Logf("rolling back deployment %s to last revision", deploymentName) rollback := newDeploymentRollback(deploymentName, nil, revision) err = c.Extensions().Deployments(ns).Rollback(rollback) Expect(err).NotTo(HaveOccurred()) // There should be revision not found event since there's no last revision waitForEvents(unversionedClient, ns, deployment, 2) events, err := c.Events(ns).Search(deployment) Expect(err).NotTo(HaveOccurred()) Expect(events.Items[1].Reason).Should(Equal(deploymentutil.RollbackRevisionNotFound)) // Check if it's still revision 1 checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage) // Update the deployment to create redis pods. updatedDeploymentImage := "redis" updatedDeploymentImageName := "redis" d.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName d.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage Logf("updating deployment %s", deploymentName) _, err = c.Extensions().Deployments(ns).Update(d) Expect(err).NotTo(HaveOccurred()) err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) Expect(err).NotTo(HaveOccurred()) // Check if it's updated to revision 2 correctly checkDeploymentRevision(c, ns, deploymentName, "2", updatedDeploymentImageName, updatedDeploymentImage) // Update the deploymentRollback to rollback to revision 1 revision = 1 Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.Extensions().Deployments(ns).Rollback(rollback) Expect(err).NotTo(HaveOccurred()) err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) Expect(err).NotTo(HaveOccurred()) // There should be rollback event after we rollback to revision 1 waitForEvents(unversionedClient, ns, deployment, 5) events, err = c.Events(ns).Search(deployment) Expect(err).NotTo(HaveOccurred()) Expect(events.Items[4].Reason).Should(Equal(deploymentutil.RollbackDone)) // Check if it's updated to revision 3 correctly checkDeploymentRevision(c, ns, deploymentName, "3", deploymentImageName, deploymentImage) // Update the deploymentRollback to rollback to revision 10 // Since there's no revision 10 in history, it should stay as revision 3, and emit an event revision = 10 Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.Extensions().Deployments(ns).Rollback(rollback) Expect(err).NotTo(HaveOccurred()) // There should be revision not found event since there's no revision 10 waitForEvents(unversionedClient, ns, deployment, 7) events, err = c.Events(ns).Search(deployment) Expect(err).NotTo(HaveOccurred()) Expect(events.Items[6].Reason).Should(Equal(deploymentutil.RollbackRevisionNotFound)) // Check if it's still revision 3 checkDeploymentRevision(c, ns, deploymentName, "3", deploymentImageName, deploymentImage) // Update the deploymentRollback to rollback to revision 3 // Since it's already revision 3, it should be no-op and emit an event revision = 3 Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.Extensions().Deployments(ns).Rollback(rollback) Expect(err).NotTo(HaveOccurred()) // There should be revision template unchanged event since it's already revision 3 waitForEvents(unversionedClient, ns, deployment, 8) events, err = c.Events(ns).Search(deployment) Expect(err).NotTo(HaveOccurred()) Expect(events.Items[7].Reason).Should(Equal(deploymentutil.RollbackTemplateUnchanged)) // Check if it's still revision 3 checkDeploymentRevision(c, ns, deploymentName, "3", deploymentImageName, deploymentImage) }
// testRollbackDeployment tests that a deployment is created (revision 1) and updated (revision 2), and // then rollback to revision 1 (should update template to revision 1, and then update revision 1 to 3), // and then rollback to last revision. func testRollbackDeployment(f *Framework) { ns := f.Namespace.Name unversionedClient := f.Client c := clientset.FromUnversionedClient(unversionedClient) podName := "nginx" deploymentPodLabels := map[string]string{"name": podName} // Create a deployment to create nginx pods. deploymentName, deploymentImageName := "nginx-deployment", "nginx" deploymentReplicas := 1 deploymentImage := "nginx" deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType Logf("Creating deployment %s", deploymentName) d := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil) _, err := c.Extensions().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) defer func() { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) Logf("deleting deployment %s", deploymentName) Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred()) // TODO: remove this once we can delete rcs with deployment newRC, err := deploymentutil.GetNewRC(*deployment, c) Expect(err).NotTo(HaveOccurred()) Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred()) oldRCs, _, err := deploymentutil.GetOldRCs(*deployment, c) Expect(err).NotTo(HaveOccurred()) for _, oldRC := range oldRCs { Expect(c.Legacy().ReplicationControllers(ns).Delete(oldRC.Name, nil)).NotTo(HaveOccurred()) } }() // Check that deployment is created fine. deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) // Verify that the required pods have come up. err = verifyPods(unversionedClient, ns, "nginx", false, deploymentReplicas) if err != nil { Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) } deployment, err = c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) // DeploymentStatus should be appropriately updated. Expect(deployment.Status.Replicas).Should(Equal(deploymentReplicas)) Expect(deployment.Status.UpdatedReplicas).Should(Equal(deploymentReplicas)) // Check if it's updated to revision 1 correctly checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage) // Update the deployment to create redis pods. updatedDeploymentImage := "redis" updatedDeploymentImageName := "redis" d.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName d.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage Logf("updating deployment %s", deploymentName) _, err = c.Extensions().Deployments(ns).Update(d) Expect(err).NotTo(HaveOccurred()) err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) Expect(err).NotTo(HaveOccurred()) // Check if it's updated to revision 2 correctly checkDeploymentRevision(c, ns, deploymentName, "2", updatedDeploymentImageName, updatedDeploymentImage) // Update the deploymentRollback to rollback to revision 1 revision := int64(1) Logf("rolling back deployment %s to revision %d", deploymentName, revision) rollback := newDeploymentRollback(deploymentName, nil, revision) err = c.Extensions().Deployments(ns).Rollback(rollback) Expect(err).NotTo(HaveOccurred()) err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) Expect(err).NotTo(HaveOccurred()) // Check if it's updated to revision 3 correctly checkDeploymentRevision(c, ns, deploymentName, "3", deploymentImageName, deploymentImage) // Update the deploymentRollback to rollback to last revision revision = 0 Logf("rolling back deployment %s to last revision", deploymentName) rollback = newDeploymentRollback(deploymentName, nil, revision) err = c.Extensions().Deployments(ns).Rollback(rollback) Expect(err).NotTo(HaveOccurred()) err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, 0) Expect(err).NotTo(HaveOccurred()) // Check if it's updated to revision 4 correctly checkDeploymentRevision(c, ns, deploymentName, "4", updatedDeploymentImageName, updatedDeploymentImage) }
func testPausedDeployment(f *Framework) { ns := f.Namespace.Name // TODO: remove unversionedClient when the refactoring is done. Currently some // functions like verifyPod still expects a unversioned#Client. unversionedClient := f.Client c := clientset.FromUnversionedClient(unversionedClient) deploymentName := "nginx" podLabels := map[string]string{"name": "nginx"} d := newDeployment(deploymentName, 1, podLabels, "nginx", "nginx", extensions.RollingUpdateDeploymentStrategyType, nil) d.Spec.Paused = true Logf("Creating paused deployment %s", deploymentName) _, err := c.Extensions().Deployments(ns).Create(d) Expect(err).NotTo(HaveOccurred()) defer func() { _, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) Logf("deleting deployment %s", deploymentName) Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred()) }() // Check that deployment is created fine. deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) // Verify that there is no latest state realized for the new deployment. rc, err := deploymentutil.GetNewRC(*deployment, c) Expect(err).NotTo(HaveOccurred()) if rc != nil { err = fmt.Errorf("unexpected new rc/%s for deployment/%s", rc.Name, deployment.Name) Expect(err).NotTo(HaveOccurred()) } // Update the deployment to run deployment.Spec.Paused = false deployment, err = c.Extensions().Deployments(ns).Update(deployment) Expect(err).NotTo(HaveOccurred()) opts := api.ListOptions{LabelSelector: labels.Set(deployment.Spec.Selector).AsSelector()} w, err := c.Legacy().ReplicationControllers(ns).Watch(opts) Expect(err).NotTo(HaveOccurred()) select { case <-w.ResultChan(): // this is it case <-time.After(time.Minute): err = fmt.Errorf("expected a new rc to be created") Expect(err).NotTo(HaveOccurred()) } // Pause the deployment and delete the replication controller. // The paused deployment shouldn't recreate a new one. deployment.Spec.Paused = true deployment.ResourceVersion = "" deployment, err = c.Extensions().Deployments(ns).Update(deployment) Expect(err).NotTo(HaveOccurred()) newRC, err := deploymentutil.GetNewRC(*deployment, c) Expect(err).NotTo(HaveOccurred()) Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred()) deployment, err = c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) if !deployment.Spec.Paused { err = fmt.Errorf("deployment %q should be paused", deployment.Name) Expect(err).NotTo(HaveOccurred()) } shouldBeNil, err := deploymentutil.GetNewRC(*deployment, c) Expect(err).NotTo(HaveOccurred()) if shouldBeNil != nil { err = fmt.Errorf("deployment %q shouldn't have a rc but there is %q", deployment.Name, shouldBeNil.Name) Expect(err).NotTo(HaveOccurred()) } }
// testRolloverDeployment tests that deployment supports rollover. // i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes. func testRolloverDeployment(f *Framework) { ns := f.Namespace.Name // TODO: remove unversionedClient when the refactoring is done. Currently some // functions like verifyPod still expects a unversioned#Client. unversionedClient := f.Client c := clientset.FromUnversionedClient(unversionedClient) podName := "rollover-pod" deploymentPodLabels := map[string]string{"name": podName} rcPodLabels := map[string]string{ "name": podName, "pod": "nginx", } rcName := "nginx-controller" rcReplicas := 4 _, err := c.Legacy().ReplicationControllers(ns).Create(newRC(rcName, rcReplicas, rcPodLabels, "nginx", "nginx")) Expect(err).NotTo(HaveOccurred()) defer func() { Logf("deleting replication controller %s", rcName) Expect(c.Legacy().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred()) }() // Verify that the required pods have come up. err = verifyPods(unversionedClient, ns, podName, false, rcReplicas) if err != nil { Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) } // Create a deployment to delete nginx pods and instead bring up redis-slave pods. deploymentName, deploymentImageName := "redis-deployment", "redis-slave" deploymentReplicas := 4 deploymentImage := "gcr.io/google_samples/gb-redisslave:v1" deploymentMinReadySeconds := 5 deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType Logf("Creating deployment %s", deploymentName) newDeployment := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil) newDeployment.Spec.MinReadySeconds = deploymentMinReadySeconds newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{ MaxUnavailable: intstr.FromInt(1), MaxSurge: intstr.FromInt(1), } _, err = c.Extensions().Deployments(ns).Create(newDeployment) Expect(err).NotTo(HaveOccurred()) defer func() { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) Logf("deleting deployment %s", deploymentName) Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred()) // TODO: remove this once we can delete rcs with deployment newRC, err := deploymentutil.GetNewRC(*deployment, c) Expect(err).NotTo(HaveOccurred()) Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred()) }() // Verify that the pods were scaled up and down as expected. We use events to verify that. deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) // Make sure the deployment starts to scale up and down RCs waitForPartialEvents(unversionedClient, ns, deployment, 2) // Check if it's updated to revision 1 correctly _, newRC := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage) // Before the deployment finishes, update the deployment to rollover the above 2 rcs and bring up redis pods. // If the deployment already finished here, the test would fail. When this happens, increase its minReadySeconds or replicas to prevent it. Expect(newRC.Spec.Replicas).Should(BeNumerically("<", deploymentReplicas)) updatedDeploymentImage := "redis" newDeployment.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImage newDeployment.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage Logf("updating deployment %s", deploymentName) _, err = c.Extensions().Deployments(ns).Update(newDeployment) Expect(err).NotTo(HaveOccurred()) err = waitForDeploymentStatus(c, ns, deploymentName, deploymentReplicas, deploymentReplicas-1, deploymentReplicas+1, deploymentMinReadySeconds) Expect(err).NotTo(HaveOccurred()) // Check if it's updated to revision 2 correctly checkDeploymentRevision(c, ns, deploymentName, "2", updatedDeploymentImage, updatedDeploymentImage) }
func testRecreateDeployment(f *Framework) { ns := f.Namespace.Name // TODO: remove unversionedClient when the refactoring is done. Currently some // functions like verifyPod still expects a unversioned#Client. unversionedClient := f.Client c := clientset.FromUnversionedClient(unversionedClient) // Create nginx pods. deploymentPodLabels := map[string]string{"name": "sample-pod-3"} rcPodLabels := map[string]string{ "name": "sample-pod-3", "pod": "nginx", } rcName := "nginx-controller" replicas := 3 _, err := c.Legacy().ReplicationControllers(ns).Create(newRC(rcName, replicas, rcPodLabels, "nginx", "nginx")) Expect(err).NotTo(HaveOccurred()) defer func() { Logf("deleting replication controller %s", rcName) Expect(c.Legacy().ReplicationControllers(ns).Delete(rcName, nil)).NotTo(HaveOccurred()) }() // Verify that the required pods have come up. err = verifyPods(unversionedClient, ns, "sample-pod-3", false, 3) if err != nil { Logf("error in waiting for pods to come up: %s", err) Expect(err).NotTo(HaveOccurred()) } // Create a deployment to delete nginx pods and instead bring up redis pods. deploymentName := "redis-deployment-3" Logf("Creating deployment %s", deploymentName) _, err = c.Extensions().Deployments(ns).Create(newDeployment(deploymentName, replicas, deploymentPodLabels, "redis", "redis", extensions.RecreateDeploymentStrategyType, nil)) Expect(err).NotTo(HaveOccurred()) defer func() { deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) Logf("deleting deployment %s", deploymentName) Expect(c.Extensions().Deployments(ns).Delete(deploymentName, nil)).NotTo(HaveOccurred()) // TODO: remove this once we can delete rcs with deployment newRC, err := deploymentutil.GetNewRC(*deployment, c) Expect(err).NotTo(HaveOccurred()) Expect(c.Legacy().ReplicationControllers(ns).Delete(newRC.Name, nil)).NotTo(HaveOccurred()) }() err = waitForDeploymentStatus(c, ns, deploymentName, replicas, 0, replicas, 0) if err != nil { deployment, _ := c.Extensions().Deployments(ns).Get(deploymentName) Logf("deployment = %+v", deployment) } Expect(err).NotTo(HaveOccurred()) // Verify that the pods were scaled up and down as expected. We use events to verify that. deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) Expect(err).NotTo(HaveOccurred()) waitForEvents(unversionedClient, ns, deployment, 2) events, err := c.Legacy().Events(ns).Search(deployment) if err != nil { Logf("error in listing events: %s", err) Expect(err).NotTo(HaveOccurred()) } // There should be 2 events, one to scale up the new RC and then to scale down the old RC. Expect(len(events.Items)).Should(Equal(2)) newRC, err := deploymentutil.GetNewRC(*deployment, c) Expect(err).NotTo(HaveOccurred()) Expect(newRC).NotTo(Equal(nil)) Expect(events.Items[0].Message).Should(Equal(fmt.Sprintf("Scaled down rc %s to 0", rcName))) Expect(events.Items[1].Message).Should(Equal(fmt.Sprintf("Scaled up rc %s to 3", newRC.Name))) // Check if it's updated to revision 1 correctly checkDeploymentRevision(c, ns, deploymentName, "1", "redis", "redis") }
// NewFactory creates a factory with the default Kubernetes resources defined // if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig. // if optionalClientConfig is not nil, then this factory will make use of it. func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { mapper := kubectl.ShortcutExpander{RESTMapper: api.RESTMapper} flags := pflag.NewFlagSet("", pflag.ContinueOnError) flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags clientConfig := optionalClientConfig if optionalClientConfig == nil { clientConfig = DefaultClientConfig(flags) } clients := NewClientCache(clientConfig) return &Factory{ clients: clients, flags: flags, cmd: recordCommand(os.Args), Object: func() (meta.RESTMapper, runtime.ObjectTyper) { cfg, err := clientConfig.ClientConfig() CheckErr(err) cmdApiVersion := unversioned.GroupVersion{} if cfg.GroupVersion != nil { cmdApiVersion = *cfg.GroupVersion } return kubectl.OutputVersionMapper{RESTMapper: mapper, OutputVersions: []unversioned.GroupVersion{cmdApiVersion}}, api.Scheme }, Client: func() (*client.Client, error) { return clients.ClientForVersion(nil) }, ClientConfig: func() (*client.Config, error) { return clients.ClientConfigForVersion(nil) }, ClientForMapping: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } switch mapping.GroupVersionKind.Group { case api.GroupName: return client.RESTClient, nil case extensions.GroupName: return client.ExtensionsClient.RESTClient, nil } return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource) }, Describer: func(mapping *meta.RESTMapping) (kubectl.Describer, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } if describer, ok := kubectl.DescriberFor(mapping.GroupVersionKind.GroupKind(), client); ok { return describer, nil } return nil, fmt.Errorf("no description has been implemented for %q", mapping.GroupVersionKind.Kind) }, Decoder: func(toInternal bool) runtime.Decoder { if toInternal { return api.Codecs.UniversalDecoder() } return api.Codecs.UniversalDeserializer() }, JSONEncoder: func() runtime.Encoder { return api.Codecs.LegacyCodec(registered.EnabledVersions()...) }, Printer: func(mapping *meta.RESTMapping, noHeaders, withNamespace bool, wide bool, showAll bool, absoluteTimestamps bool, columnLabels []string) (kubectl.ResourcePrinter, error) { return kubectl.NewHumanReadablePrinter(noHeaders, withNamespace, wide, showAll, absoluteTimestamps, columnLabels), nil }, PodSelectorForObject: func(object runtime.Object) (string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return kubectl.MakeLabels(t.Spec.Selector), nil case *api.Pod: if len(t.Labels) == 0 { return "", fmt.Errorf("the pod has no labels and cannot be exposed") } return kubectl.MakeLabels(t.Labels), nil case *api.Service: if t.Spec.Selector == nil { return "", fmt.Errorf("the service has no pod selector set") } return kubectl.MakeLabels(t.Spec.Selector), nil case *extensions.Deployment: return kubectl.MakeLabels(t.Spec.Selector), nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return "", err } return "", fmt.Errorf("cannot extract pod selector from %v", gvk) } }, PortsForObject: func(object runtime.Object) ([]string, error) { // TODO: replace with a swagger schema based approach (identify pod selector via schema introspection) switch t := object.(type) { case *api.ReplicationController: return getPorts(t.Spec.Template.Spec), nil case *api.Pod: return getPorts(t.Spec), nil case *api.Service: return getServicePorts(t.Spec), nil case *extensions.Deployment: return getPorts(t.Spec.Template.Spec), nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot extract ports from %v", gvk) } }, LabelsForObject: func(object runtime.Object) (map[string]string, error) { return meta.NewAccessor().Labels(object) }, LogsForObject: func(object, options runtime.Object) (*client.Request, error) { c, err := clients.ClientForVersion(nil) if err != nil { return nil, err } switch t := object.(type) { case *api.Pod: opts, ok := options.(*api.PodLogOptions) if !ok { return nil, errors.New("provided options object is not a PodLogOptions") } return c.Pods(t.Namespace).GetLogs(t.Name, opts), nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot get the logs from %v", gvk) } }, PauseObject: func(object runtime.Object) (bool, error) { c, err := clients.ClientForVersion(nil) if err != nil { return false, err } switch t := object.(type) { case *extensions.Deployment: if t.Spec.Paused { return true, nil } t.Spec.Paused = true _, err := c.Extensions().Deployments(t.Namespace).Update(t) return false, err default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return false, err } return false, fmt.Errorf("cannot pause %v", gvk) } }, ResumeObject: func(object runtime.Object) (bool, error) { c, err := clients.ClientForVersion(nil) if err != nil { return false, err } switch t := object.(type) { case *extensions.Deployment: if !t.Spec.Paused { return true, nil } t.Spec.Paused = false _, err := c.Extensions().Deployments(t.Namespace).Update(t) return false, err default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return false, err } return false, fmt.Errorf("cannot resume %v", gvk) } }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client) }, HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) clientset := clientset.FromUnversionedClient(client) if err != nil { return nil, err } return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset) }, Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) { mappingVersion := mapping.GroupVersionKind.GroupVersion() client, err := clients.ClientForVersion(&mappingVersion) if err != nil { return nil, err } return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client) }, Validator: func(validate bool, cacheDir string) (validation.Schema, error) { if validate { client, err := clients.ClientForVersion(nil) if err != nil { return nil, err } dir := cacheDir if len(dir) > 0 { version, err := client.ServerVersion() if err != nil { return nil, err } dir = path.Join(cacheDir, version.String()) } return &clientSwaggerSchema{ c: client, cacheDir: dir, mapper: api.RESTMapper, }, nil } return validation.NullSchema{}, nil }, SwaggerSchema: func(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) { client, err := clients.ClientForVersion(&version) if err != nil { return nil, err } return client.Discovery().SwaggerSchema(version) }, DefaultNamespace: func() (string, bool, error) { return clientConfig.Namespace() }, Generators: func(cmdName string) map[string]kubectl.Generator { return DefaultGenerators(cmdName) }, CanBeExposed: func(kind unversioned.GroupKind) error { switch kind { case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"): // nothing to do here default: return fmt.Errorf("cannot expose a %s", kind) } return nil }, CanBeAutoscaled: func(kind unversioned.GroupKind) error { switch kind { case api.Kind("ReplicationController"), extensions.Kind("Deployment"): // nothing to do here default: return fmt.Errorf("cannot autoscale a %v", kind) } return nil }, AttachablePodForObject: func(object runtime.Object) (*api.Pod, error) { client, err := clients.ClientForVersion(nil) if err != nil { return nil, err } switch t := object.(type) { case *api.ReplicationController: return GetFirstPod(client, t.Namespace, t.Spec.Selector) case *extensions.Deployment: return GetFirstPod(client, t.Namespace, t.Spec.Selector) case *extensions.Job: return GetFirstPod(client, t.Namespace, t.Spec.Selector.MatchLabels) case *api.Pod: return t, nil default: gvk, err := api.Scheme.ObjectKind(object) if err != nil { return nil, err } return nil, fmt.Errorf("cannot attach to %v: not implemented", gvk) } }, EditorEnvs: func() []string { return []string{"KUBE_EDITOR", "EDITOR"} }, } }