func reserveAllCpu(f *framework.Framework, id string, millicores int) error { timeout := 5 * time.Minute replicas := millicores / 100 ReserveCpu(f, id, 1, 100) framework.ExpectNoError(framework.ScaleRC(f.Client, f.Namespace.Name, id, uint(replicas), false)) for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { pods, err := framework.GetPodsInNamespace(f.Client, f.Namespace.Name, framework.ImagePullerLabels) if err != nil { return err } if len(pods) != replicas { continue } allRunningOrUnschedulable := true for _, pod := range pods { if !podRunningOrUnschedulable(pod) { allRunningOrUnschedulable = false break } } if allRunningOrUnschedulable { return nil } } return fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", id, timeout, replicas) }
// Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards. // Scaling happens always based on original size, not the current size. func scaleRC(wg *sync.WaitGroup, config *testutils.RCConfig, scalingTime time.Duration) { defer GinkgoRecover() defer wg.Done() sleepUpTo(scalingTime) newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2) framework.ExpectNoError(framework.ScaleRC(config.Client, config.Namespace, config.Name, newSize, true), fmt.Sprintf("scaling rc %s for the first time", config.Name)) selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name})) options := api.ListOptions{ LabelSelector: selector, ResourceVersion: "0", } _, err := config.Client.Core().Pods(config.Namespace).List(options) framework.ExpectNoError(err, fmt.Sprintf("listing pods from rc %v", config.Name)) }
By("reserving all available cpu") err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores) defer framework.DeleteRCAndPods(f.Client, ns, "reserve-all-cpu") framework.ExpectNoError(err) By("creating a new instance of DNS and waiting for DNS to be scheduled") label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"})) listOpts := api.ListOptions{LabelSelector: label} rcs, err := f.Client.ReplicationControllers(api.NamespaceSystem).List(listOpts) framework.ExpectNoError(err) Expect(len(rcs.Items)).Should(Equal(1)) rc := rcs.Items[0] replicas := uint(rc.Spec.Replicas) err = framework.ScaleRC(f.Client, api.NamespaceSystem, rc.Name, replicas+1, true) defer framework.ExpectNoError(framework.ScaleRC(f.Client, api.NamespaceSystem, rc.Name, replicas, true)) framework.ExpectNoError(err) }) }) func reserveAllCpu(f *framework.Framework, id string, millicores int) error { timeout := 5 * time.Minute replicas := millicores / 100 ReserveCpu(f, id, 1, 100) framework.ExpectNoError(framework.ScaleRC(f.Client, f.Namespace.Name, id, uint(replicas), false)) for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { pods, err := framework.GetPodsInNamespace(f.Client, f.Namespace.Name, framework.ImagePullerLabels) if err != nil {
}) It("Controller Manager should not create/delete replicas across restart", func() { // Requires master ssh access. framework.SkipUnlessProviderIs("gce", "aws") restarter := NewRestartConfig( framework.GetMasterHost(), "kube-controller", ports.ControllerManagerPort, restartPollInterval, restartTimeout) restarter.restart() // The intent is to ensure the replication controller manager has observed and reported status of // the replication controller at least once since the manager restarted, so that we can determine // that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC // to the same size achieves this, because the scale operation advances the RC's sequence number // and awaits it to be observed and reported back in the RC's status. framework.ScaleRC(f.Client, f.ClientSet, ns, rcName, numPods, true) // Only check the keys, the pods can be different if the kubelet updated it. // TODO: Can it really? existingKeys := sets.NewString() newKeys := sets.NewString() for _, k := range existingPods.ListKeys() { existingKeys.Insert(k) } for _, k := range newPods.ListKeys() { newKeys.Insert(k) } if len(newKeys.List()) != len(existingKeys.List()) || !newKeys.IsSuperset(existingKeys) { framework.Failf("RcManager created/deleted pods after restart \n\n %+v", tracker) }
framework.RunKubectlOrDie("create", "-f", rethinkDbControllerYaml, nsFlag) label := labels.SelectorFromSet(labels.Set(map[string]string{"db": "rethinkdb"})) err := framework.WaitForPodsWithLabelRunning(c, ns, label) Expect(err).NotTo(HaveOccurred()) checkDbInstances := func() { forEachPod("db", "rethinkdb", func(pod api.Pod) { _, err = framework.LookForStringInLog(ns, pod.Name, "rethinkdb", "Server ready", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) }) } checkDbInstances() err = framework.WaitForEndpoint(c, ns, "rethinkdb-driver") Expect(err).NotTo(HaveOccurred()) By("scaling rethinkdb") framework.ScaleRC(c, ns, "rethinkdb-rc", 2, true) checkDbInstances() By("starting admin") framework.RunKubectlOrDie("create", "-f", adminServiceYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", adminPodYaml, nsFlag) err = framework.WaitForPodNameRunningInNamespace(c, "rethinkdb-admin", ns) Expect(err).NotTo(HaveOccurred()) checkDbInstances() content, err := makeHttpRequestToService(c, ns, "rethinkdb-admin", "/", framework.EndpointRegisterTimeout) Expect(err).NotTo(HaveOccurred()) if !strings.Contains(content, "<title>RethinkDB Administration Console</title>") { framework.Failf("RethinkDB console is not running") } }) })
framework.RunKubectlOrDie("create", "-f", rethinkDbControllerYaml, nsFlag) label := labels.SelectorFromSet(labels.Set(map[string]string{"db": "rethinkdb"})) err := testutils.WaitForPodsWithLabelRunning(c, ns, label) Expect(err).NotTo(HaveOccurred()) checkDbInstances := func() { forEachPod("db", "rethinkdb", func(pod v1.Pod) { _, err = framework.LookForStringInLog(ns, pod.Name, "rethinkdb", "Server ready", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) }) } checkDbInstances() err = framework.WaitForEndpoint(c, ns, "rethinkdb-driver") Expect(err).NotTo(HaveOccurred()) By("scaling rethinkdb") framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, "rethinkdb-rc", 2, true) checkDbInstances() By("starting admin") framework.RunKubectlOrDie("create", "-f", adminServiceYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", adminPodYaml, nsFlag) err = framework.WaitForPodNameRunningInNamespace(c, "rethinkdb-admin", ns) Expect(err).NotTo(HaveOccurred()) checkDbInstances() content, err := makeHttpRequestToService(c, ns, "rethinkdb-admin", "/", framework.EndpointRegisterTimeout) Expect(err).NotTo(HaveOccurred()) if !strings.Contains(content, "<title>RethinkDB Administration Console</title>") { framework.Failf("RethinkDB console is not running") } }) })
framework.RunKubectlOrDie("create", "-f", rethinkDbControllerYaml, nsFlag) label := labels.SelectorFromSet(labels.Set(map[string]string{"db": "rethinkdb"})) err := framework.WaitForPodsWithLabelRunning(c, ns, label) Expect(err).NotTo(HaveOccurred()) checkDbInstances := func() { forEachPod("db", "rethinkdb", func(pod api.Pod) { _, err = framework.LookForStringInLog(ns, pod.Name, "rethinkdb", "Server ready", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) }) } checkDbInstances() err = framework.WaitForEndpoint(c, ns, "rethinkdb-driver") Expect(err).NotTo(HaveOccurred()) By("scaling rethinkdb") framework.ScaleRC(c, f.ClientSet, ns, "rethinkdb-rc", 2, true) checkDbInstances() By("starting admin") framework.RunKubectlOrDie("create", "-f", adminServiceYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", adminPodYaml, nsFlag) err = framework.WaitForPodNameRunningInNamespace(c, "rethinkdb-admin", ns) Expect(err).NotTo(HaveOccurred()) checkDbInstances() content, err := makeHttpRequestToService(c, ns, "rethinkdb-admin", "/", framework.EndpointRegisterTimeout) Expect(err).NotTo(HaveOccurred()) if !strings.Contains(content, "<title>RethinkDB Administration Console</title>") { framework.Failf("RethinkDB console is not running") } }) })