Esempio n. 1
0
// Check that the pods comprising a replication controller get spread evenly across available zones
func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
	name := "ubelite-spread-rc-" + string(util.NewUUID())
	By(fmt.Sprintf("Creating replication controller %s", name))
	controller, err := f.Client.ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Namespace: f.Namespace.Name,
			Name:      name,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicaCount,
			Selector: map[string]string{
				"name": name,
			},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  name,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	// Cleanup the replication controller when we are done.
	defer func() {
		// Resize the replication controller to zero to get rid of pods.
		if err := framework.DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil {
			framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
		}
	}()
	// List the pods, making sure we observe all the replicas.
	selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	pods, err := framework.PodsCreated(f.Client, f.Namespace.Name, name, replicaCount)
	Expect(err).NotTo(HaveOccurred())

	// Wait for all of them to be scheduled
	By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled.  Selector: %v", replicaCount, name, selector))
	pods, err = framework.WaitForPodsWithLabelScheduled(f.Client, f.Namespace.Name, selector)
	Expect(err).NotTo(HaveOccurred())

	// Now make sure they're spread across zones
	zoneNames, err := getZoneNames(f.Client)
	Expect(err).NotTo(HaveOccurred())
	Expect(checkZoneSpreading(f.Client, pods, zoneNames)).To(Equal(true))
}
Esempio n. 2
0
			node, err := c.Core().Nodes().Get(nodeName)
			Expect(err).NotTo(HaveOccurred())

			// This creates a temporary network partition, verifies that 'podNameToDisappear',
			// that belongs to replication controller 'rcName', did not disappear (because its
			// grace period is set to 30).
			// Finally, it checks that the replication controller recreates the
			// pods on another node and that now the number of replicas is equal 'replicas + 1'.
			By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
			testUnderTemporaryNetworkFailure(c, ns, node, func() {
				framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
				err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
				Expect(err).To(Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")

				By(fmt.Sprintf("verifying that there are %v running pods during partition", replicas))
				_, err = framework.PodsCreated(c, ns, name, replicas)
				Expect(err).NotTo(HaveOccurred())
			})

			framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
			if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
				framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
			}
		})
	})

	framework.KubeDescribe("[StatefulSet]", func() {
		psName := "pet"
		labels := map[string]string{
			"foo": "bar",
		}
Esempio n. 3
0
// A basic test to check the deployment of an image using
// a replication controller. The image serves its hostname
// which is checked for each replica.
func ServeImageOrFail(f *framework.Framework, test string, image string) {
	name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
	replicas := int32(2)

	// Create a replication controller for a service
	// that serves its hostname.
	// The source for the Docker containter kubernetes/serve_hostname is
	// in contrib/for-demos/serve_hostname
	By(fmt.Sprintf("Creating replication controller %s", name))
	controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{
		ObjectMeta: v1.ObjectMeta{
			Name: name,
		},
		Spec: v1.ReplicationControllerSpec{
			Replicas: func(i int32) *int32 { return &i }(replicas),
			Selector: map[string]string{
				"name": name,
			},
			Template: &v1.PodTemplateSpec{
				ObjectMeta: v1.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: v1.PodSpec{
					Containers: []v1.Container{
						{
							Name:  name,
							Image: image,
							Ports: []v1.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	// Cleanup the replication controller when we are done.
	defer func() {
		// Resize the replication controller to zero to get rid of pods.
		if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, controller.Name); err != nil {
			framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
		}
	}()

	// List the pods, making sure we observe all the replicas.
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))

	pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)

	By("Ensuring each pod is running")

	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	for _, pod := range pods.Items {
		if pod.DeletionTimestamp != nil {
			continue
		}
		err = f.WaitForPodRunning(pod.Name)
		Expect(err).NotTo(HaveOccurred())
	}

	// Verify that something is listening.
	By("Trying to dial each unique pod")
	retryTimeout := 2 * time.Minute
	retryInterval := 5 * time.Second
	err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
	if err != nil {
		framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
	}
}
Esempio n. 4
0
func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volumeMounts []v1.VolumeMount, podCount int32) {
	rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
	nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
	Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
	targetNode := nodeList.Items[0]

	By("Creating RC which spawns configmap-volume pods")
	affinity := &v1.Affinity{
		NodeAffinity: &v1.NodeAffinity{
			RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
				NodeSelectorTerms: []v1.NodeSelectorTerm{
					{
						MatchExpressions: []v1.NodeSelectorRequirement{
							{
								Key:      "kubernetes.io/hostname",
								Operator: v1.NodeSelectorOpIn,
								Values:   []string{targetNode.Name},
							},
						},
					},
				},
			},
		},
	}

	rc := &v1.ReplicationController{
		ObjectMeta: v1.ObjectMeta{
			Name: rcName,
		},
		Spec: v1.ReplicationControllerSpec{
			Replicas: &podCount,
			Selector: map[string]string{
				"name": rcName,
			},
			Template: &v1.PodTemplateSpec{
				ObjectMeta: v1.ObjectMeta{
					Labels: map[string]string{"name": rcName},
				},
				Spec: v1.PodSpec{
					Containers: []v1.Container{
						{
							Name:    "test-container",
							Image:   "gcr.io/google_containers/busybox:1.24",
							Command: []string{"sleep", "10000"},
							Resources: v1.ResourceRequirements{
								Requests: v1.ResourceList{
									v1.ResourceCPU: resource.MustParse("10m"),
								},
							},
							VolumeMounts: volumeMounts,
						},
					},
					Affinity:  affinity,
					DNSPolicy: v1.DNSDefault,
					Volumes:   volumes,
				},
			},
		},
	}
	_, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(rc)
	Expect(err).NotTo(HaveOccurred(), "error creating replication controller")

	defer func() {
		err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
		framework.ExpectNoError(err)
	}()

	pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)

	By("Ensuring each pod is running")

	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	for _, pod := range pods.Items {
		if pod.DeletionTimestamp != nil {
			continue
		}
		err = f.WaitForPodRunning(pod.Name)
		Expect(err).NotTo(HaveOccurred(), "Failed waiting for pod %s to enter running state", pod.Name)
	}
}