// Check that the pods comprising a service get spread evenly across available zones func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) { // First create the service serviceName := "test-service" serviceSpec := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: f.Namespace.Name, }, Spec: v1.ServiceSpec{ Selector: map[string]string{ "service": serviceName, }, Ports: []v1.ServicePort{{ Port: 80, TargetPort: intstr.FromInt(80), }}, }, } _, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(serviceSpec) Expect(err).NotTo(HaveOccurred()) // Now create some pods behind the service podSpec := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Labels: map[string]string{"service": serviceName}, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "test", Image: framework.GetPauseImageName(f.ClientSet), }, }, }, } // Caution: StartPods requires at least one pod to replicate. // Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0. // Thus, no need to test for it. Once the precondition changes to zero number of replicas, // test for replicaCount > 0. Otherwise, StartPods panics. framework.ExpectNoError(testutils.StartPods(f.ClientSet, replicaCount, f.Namespace.Name, serviceName, *podSpec, false, framework.Logf)) // Wait for all of them to be scheduled selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName})) pods, err := framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector) Expect(err).NotTo(HaveOccurred()) // Now make sure they're spread across zones zoneNames, err := getZoneNames(f.ClientSet) Expect(err).NotTo(HaveOccurred()) Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true)) }
totalPodCapacity += podCapacity.Value() } currentlyScheduledPods := framework.WaitForStableCluster(c, masterNodes) podsNeededForSaturation := int(totalPodCapacity) - currentlyScheduledPods By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation)) // As the pods are distributed randomly among nodes, // it can easily happen that all nodes are satured // and there is no need to create additional pods. // StartPods requires at least one pod to replicate. if podsNeededForSaturation > 0 { framework.ExpectNoError(testutils.StartPods(c, podsNeededForSaturation, ns, "maxp", *initPausePod(f, pausePodConfig{ Name: "", Labels: map[string]string{"name": ""}, }), true, framework.Logf)) } podName := "additional-pod" createPausePod(f, pausePodConfig{ Name: podName, Labels: map[string]string{"name": "additional"}, }) waitForScheduler() verifyResult(c, podsNeededForSaturation, 1, ns) }) // This test verifies we don't allow scheduling of pods in a way that sum of limits of pods is greater than machines capacity. // It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods. // It is so because we need to have precise control on what's running in the cluster.