// Check that the pods comprising a service get spread evenly across available zones func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) { // First create the service serviceName := "test-service" serviceSpec := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: serviceName, Namespace: f.Namespace.Name, }, Spec: api.ServiceSpec{ Selector: map[string]string{ "service": serviceName, }, Ports: []api.ServicePort{{ Port: 80, TargetPort: intstr.FromInt(80), }}, }, } _, err := f.Client.Services(f.Namespace.Name).Create(serviceSpec) Expect(err).NotTo(HaveOccurred()) // Now create some pods behind the service podSpec := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: serviceName, Labels: map[string]string{"service": serviceName}, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "test", Image: framework.GetPauseImageName(f.Client), }, }, }, } // Caution: StartPods requires at least one pod to replicate. // Based on the callers, replicas is always positive number: zoneCount >= 0 implies (2*zoneCount)+1 > 0. // Thus, no need to test for it. Once the precondition changes to zero number of replicas, // test for replicaCount > 0. Otherwise, StartPods panics. framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false) // Wait for all of them to be scheduled selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName})) pods, err := framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector) Expect(err).NotTo(HaveOccurred()) // Now make sure they're spread across zones zoneNames, err := getZoneNames(f.Client) Expect(err).NotTo(HaveOccurred()) Expect(checkZoneSpreading(f.Client, pods, zoneNames)).To(Equal(true)) }
// Check that the pods comprising a service get spread evenly across available zones func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) { // First create the service serviceName := "test-service" serviceSpec := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: serviceName, Namespace: f.Namespace.Name, }, Spec: api.ServiceSpec{ Selector: map[string]string{ "service": serviceName, }, Ports: []api.ServicePort{{ Port: 80, TargetPort: intstr.FromInt(80), }}, }, } _, err := f.Client.Services(f.Namespace.Name).Create(serviceSpec) Expect(err).NotTo(HaveOccurred()) // Now create some pods behind the service podSpec := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: serviceName, Labels: map[string]string{"service": serviceName}, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "test", Image: "gcr.io/google_containers/pause-amd64:3.0", }, }, }, } framework.StartPods(f.Client, replicaCount, f.Namespace.Name, serviceName, *podSpec, false) // Wait for all of them to be scheduled selector := labels.SelectorFromSet(labels.Set(map[string]string{"service": serviceName})) pods, err := framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector) Expect(err).NotTo(HaveOccurred()) // Now make sure they're spread across zones zoneNames, err := getZoneNames(f.Client) Expect(err).NotTo(HaveOccurred()) Expect(checkZoneSpreading(f.Client, pods, zoneNames)).To(Equal(true)) }
By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation)) // As the pods are distributed randomly among nodes, // it can easily happen that all nodes are satured // and there is no need to create additional pods. // StartPods requires at least one pod to replicate. if podsNeededForSaturation > 0 { framework.StartPods(c, podsNeededForSaturation, ns, "maxp", api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, ObjectMeta: api.ObjectMeta{ Name: "", Labels: map[string]string{"name": ""}, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "", Image: framework.GetPauseImageName(f.Client), }, }, }, }, true) } podName := "additional-pod" _, err := c.Pods(ns).Create(&api.Pod{ TypeMeta: unversioned.TypeMeta{ Kind: "Pod", }, ObjectMeta: api.ObjectMeta{
totalPodCapacity += podCapacity.Value() } currentlyScheduledPods := framework.WaitForStableCluster(c, masterNodes) podsNeededForSaturation := int(totalPodCapacity) - currentlyScheduledPods By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation)) // As the pods are distributed randomly among nodes, // it can easily happen that all nodes are satured // and there is no need to create additional pods. // StartPods requires at least one pod to replicate. if podsNeededForSaturation > 0 { framework.StartPods(c, podsNeededForSaturation, ns, "maxp", *initPausePod(f, pausePodConfig{ Name: "", Labels: map[string]string{"name": ""}, }), true) } podName := "additional-pod" createPausePod(f, pausePodConfig{ Name: podName, Labels: map[string]string{"name": "additional"}, }) waitForScheduler() verifyResult(c, podsNeededForSaturation, 1, ns) }) // This test verifies we don't allow scheduling of pods in a way that sum of limits of pods is greater than machines capacity. // It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods. // It is so because we need to have precise control on what's running in the cluster.