Пример #1
0
func runReplicationControllerTest(c *client.Client) {
	data, err := ioutil.ReadFile("api/examples/controller.json")
	if err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}
	var controller api.ReplicationController
	if err := api.Scheme.DecodeInto(data, &controller); err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}

	glog.Infof("Creating replication controllers")
	if _, err := c.ReplicationControllers(api.NamespaceDefault).Create(&controller); err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}
	glog.Infof("Done creating replication controllers")

	// Give the controllers some time to actually create the pods
	if err := wait.Poll(time.Second, time.Second*30, client.ControllerHasDesiredReplicas(c, &controller)); err != nil {
		glog.Fatalf("FAILED: pods never created %v", err)
	}

	// wait for minions to indicate they have info about the desired pods
	pods, err := c.Pods(api.NamespaceDefault).List(labels.Set(controller.Spec.Selector).AsSelector())
	if err != nil {
		glog.Fatalf("FAILED: unable to get pods to list: %v", err)
	}
	if err := wait.Poll(time.Second, time.Second*30, podsOnMinions(c, *pods)); err != nil {
		glog.Fatalf("FAILED: pods never started running %v", err)
	}

	glog.Infof("Pods created")
}
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) {
	rcList, err := c.ReplicationControllers(api.NamespaceDefault).List(labels.Everything())
	if err != nil {
		return nil, err
	}
	expectedPods := []string{}
	for _, rc := range rcList.Items {
		if _, ok := expectedRcs[rc.Name]; ok {
			if rc.Status.Replicas != 1 {
				return nil, fmt.Errorf("expected to find only one replica for rc %q, found %d", rc.Name, rc.Status.Replicas)
			}
			expectedRcs[rc.Name] = true
			podList, err := c.Pods(api.NamespaceDefault).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
	}
	for rc, found := range expectedRcs {
		if !found {
			return nil, fmt.Errorf("Replication Controller %q not found.", rc)
		}
	}
	return expectedPods, nil
}
Пример #3
0
// Describer returns the default describe functions for each of the standard
// Kubernetes types.
func DescriberFor(kind string, c *client.Client) (Describer, bool) {
	switch kind {
	case "Pod":
		return &PodDescriber{
			PodClient: func(namespace string) (client.PodInterface, error) {
				return c.Pods(namespace), nil
			},
			ReplicationControllerClient: func(namespace string) (client.ReplicationControllerInterface, error) {
				return c.ReplicationControllers(namespace), nil
			},
		}, true
	case "ReplicationController":
		return &ReplicationControllerDescriber{
			PodClient: func(namespace string) (client.PodInterface, error) {
				return c.Pods(namespace), nil
			},
			ReplicationControllerClient: func(namespace string) (client.ReplicationControllerInterface, error) {
				return c.ReplicationControllers(namespace), nil
			},
		}, true
	case "Service":
		return &ServiceDescriber{
			ServiceClient: func(namespace string) (client.ServiceInterface, error) {
				return c.Services(namespace), nil
			},
		}, true
	}
	return nil, false
}
Пример #4
0
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
	start := time.Now()
	defer func() {
		glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
	}()
	hostField := fields.OneTermEqualSelector(client.PodHost, host)
	pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField)
	if err != nil || len(pods.Items) == numPods {
		return err
	}
	glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
	// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest.
	controller := RCFromManifest(TestRCManifest)

	// Make the rc unique to the given host.
	controller.Spec.Replicas = numPods
	controller.Spec.Template.Spec.NodeName = host
	controller.Name = controller.Name + host
	controller.Spec.Selector["host"] = host
	controller.Spec.Template.Labels["host"] = host

	if rc, err := StartRC(controller, restClient); err != nil {
		return err
	} else {
		// Delete the rc, otherwise when we restart master components for the next benchmark
		// the rc controller will race with the pods controller in the rc manager.
		return restClient.ReplicationControllers(TestNS).Delete(rc.Name)
	}
}
Пример #5
0
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) {
	expectedPods := []string{}
	// Iterate over the labels that identify the replication controllers that we
	// want to check. The rcLabels contains the value values for the k8s-app key
	// that identify the replication controllers that we want to check. Using a label
	// rather than an explicit name is preferred because the names will typically have
	// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
	// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
	// situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
	// is running (which would be an error except during a rolling update).
	for _, rcLabel := range rcLabels {
		rcList, err := c.ReplicationControllers(api.NamespaceDefault).List(labels.Set{"k8s-app": rcLabel}.AsSelector())
		if err != nil {
			return nil, err
		}
		if len(rcList.Items) != 1 {
			return nil, fmt.Errorf("expected to find one replica for RC with label %s but got %d",
				rcLabel, len(rcList.Items))
		}
		for _, rc := range rcList.Items {
			podList, err := c.Pods(api.NamespaceDefault).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
			if err != nil {
				return nil, err
			}
			for _, pod := range podList.Items {
				expectedPods = append(expectedPods, string(pod.UID))
			}
		}
	}
	return expectedPods, nil
}
Пример #6
0
func runReplicationControllerTest(c *client.Client) {
	clientAPIVersion := c.APIVersion()
	data, err := ioutil.ReadFile("cmd/integration/" + clientAPIVersion + "-controller.json")
	if err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}
	var controller api.ReplicationController
	if err := api.Scheme.DecodeInto(data, &controller); err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}

	glog.Infof("Creating replication controllers")
	updated, err := c.ReplicationControllers("test").Create(&controller)
	if err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}
	glog.Infof("Done creating replication controllers")

	// Give the controllers some time to actually create the pods
	if err := wait.Poll(time.Second, time.Second*30, client.ControllerHasDesiredReplicas(c, updated)); err != nil {
		glog.Fatalf("FAILED: pods never created %v", err)
	}

	// Poll till we can retrieve the status of all pods matching the given label selector from their minions.
	// This involves 3 operations:
	//	- The scheduler must assign all pods to a minion
	//	- The assignment must reflect in a `List` operation against the apiserver, for labels matching the selector
	//  - We need to be able to query the kubelet on that minion for information about the pod
	if err := wait.Poll(
		time.Second, time.Second*30, podsOnMinions(c, "test", labels.Set(updated.Spec.Selector).AsSelector())); err != nil {
		glog.Fatalf("FAILED: pods never started running %v", err)
	}

	glog.Infof("Pods created")
}
Пример #7
0
func CreateNewControllerFromCurrentController(c *client.Client, namespace, oldName, newName, image, deploymentKey string) (*api.ReplicationController, error) {
	// load the old RC into the "new" RC
	newRc, err := c.ReplicationControllers(namespace).Get(oldName)
	if err != nil {
		return nil, err
	}

	if len(newRc.Spec.Template.Spec.Containers) > 1 {
		// TODO: support multi-container image update.
		return nil, goerrors.New("Image update is not supported for multi-container pods")
	}
	if len(newRc.Spec.Template.Spec.Containers) == 0 {
		return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc))
	}
	newRc.Spec.Template.Spec.Containers[0].Image = image

	newHash, err := api.HashObject(newRc, c.Codec)
	if err != nil {
		return nil, err
	}

	if len(newName) == 0 {
		newName = fmt.Sprintf("%s-%s", newRc.Name, newHash)
	}
	newRc.Name = newName

	newRc.Spec.Selector[deploymentKey] = newHash
	newRc.Spec.Template.Labels[deploymentKey] = newHash
	// Clear resource version after hashing so that identical updates get different hashes.
	newRc.ResourceVersion = ""
	return newRc, nil
}
Пример #8
0
func resizeRC(c *client.Client, ns, name string, replicas int) error {
	rc, err := c.ReplicationControllers(ns).Get(name)
	if err != nil {
		return err
	}
	rc.Spec.Replicas = replicas
	_, err = c.ReplicationControllers(rc.Namespace).Update(rc)
	return err
}
Пример #9
0
func LoadExistingNextReplicationController(c *client.Client, namespace, newName string) (*api.ReplicationController, error) {
	if len(newName) == 0 {
		return nil, nil
	}
	newRc, err := c.ReplicationControllers(namespace).Get(newName)
	if err != nil && errors.IsNotFound(err) {
		return nil, nil
	}
	return newRc, err
}
Пример #10
0
// StartRC creates given rc if it doesn't already exist, then updates it via kubectl's scaler.
func StartRC(controller *api.ReplicationController, restClient *client.Client) (*api.ReplicationController, error) {
	created, err := restClient.ReplicationControllers(controller.Namespace).Get(controller.Name)
	if err != nil {
		glog.Infof("Rc %v doesn't exist, creating", controller.Name)
		created, err = restClient.ReplicationControllers(controller.Namespace).Create(controller)
		if err != nil {
			return nil, err
		}
	}
	// If we just created an rc, wait till it creates its replicas.
	return ScaleRC(created.Name, created.Namespace, controller.Spec.Replicas, restClient)
}
Пример #11
0
// ScaleRC scales the given rc to the given replicas.
func ScaleRC(name, ns string, replicas int, restClient *client.Client) (*api.ReplicationController, error) {
	scaler, err := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(restClient))
	if err != nil {
		return nil, err
	}
	retry := &kubectl.RetryParams{50 * time.Millisecond, DefaultTimeout}
	waitForReplicas := &kubectl.RetryParams{50 * time.Millisecond, DefaultTimeout}
	err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas)
	if err != nil {
		return nil, err
	}
	scaled, err := restClient.ReplicationControllers(ns).Get(name)
	if err != nil {
		return nil, err
	}
	return scaled, nil
}
Пример #12
0
// Delete a Replication Controller and all pods it spawned
func DeleteRC(c *client.Client, ns, name string) error {
	rc, err := c.ReplicationControllers(ns).Get(name)
	if err != nil {
		return fmt.Errorf("Failed to find replication controller %s in namespace %s: %v", name, ns, err)
	}

	rc.Spec.Replicas = 0

	if _, err := c.ReplicationControllers(ns).Update(rc); err != nil {
		return fmt.Errorf("Failed to resize replication controller %s to zero: %v", name, err)
	}

	// Wait up to 20 minutes until all replicas are killed.
	endTime := time.Now().Add(time.Minute * 20)
	for {
		if time.Now().After(endTime) {
			return fmt.Errorf("Timeout while waiting for replication controller %s replicas to 0", name)
		}
		remainingTime := endTime.Sub(time.Now())
		err := wait.Poll(time.Second, remainingTime, client.ControllerHasDesiredReplicas(c, rc))
		if err != nil {
			Logf("Error while waiting for replication controller %s replicas to read 0: %v", name, err)
		} else {
			break
		}
	}

	// Delete the replication controller.
	if err := c.ReplicationControllers(ns).Delete(name); err != nil {
		return fmt.Errorf("Failed to delete replication controller %s: %v", name, err)
	}
	return nil
}
Пример #13
0
func runReplicationControllerTest(c *client.Client) {
	clientAPIVersion := c.APIVersion()
	data, err := ioutil.ReadFile("cmd/integration/" + clientAPIVersion + "-controller.json")
	if err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}
	var controller api.ReplicationController
	if err := api.Scheme.DecodeInto(data, &controller); err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}

	glog.Infof("Creating replication controllers")
	updated, err := c.ReplicationControllers("test").Create(&controller)
	if err != nil {
		glog.Fatalf("Unexpected error: %v", err)
	}
	glog.Infof("Done creating replication controllers")

	// In practice the controller doesn't need 60s to create a handful of pods, but network latencies on CI
	// systems have been observed to vary unpredictably, so give the controller enough time to create pods.
	// Our e2e scalability tests will catch controllers that are *actually* slow.
	if err := wait.Poll(time.Second, time.Second*60, client.ControllerHasDesiredReplicas(c, updated)); err != nil {
		glog.Fatalf("FAILED: pods never created %v", err)
	}

	// Poll till we can retrieve the status of all pods matching the given label selector from their minions.
	// This involves 3 operations:
	//	- The scheduler must assign all pods to a minion
	//	- The assignment must reflect in a `List` operation against the apiserver, for labels matching the selector
	//  - We need to be able to query the kubelet on that minion for information about the pod
	if err := wait.Poll(
		time.Second, time.Second*30, podsOnMinions(c, "test", labels.Set(updated.Spec.Selector).AsSelector())); err != nil {
		glog.Fatalf("FAILED: pods never started running %v", err)
	}

	glog.Infof("Pods created")
}
Пример #14
0
// Delete a Replication Controller and all pods it spawned
func DeleteRC(c *client.Client, ns, name string) error {
	rc, err := c.ReplicationControllers(ns).Get(name)
	if err != nil {
		return fmt.Errorf("Failed to find replication controller %s in namespace %s: %v", name, ns, err)
	}

	rc.Spec.Replicas = 0

	if _, err := c.ReplicationControllers(ns).Update(rc); err != nil {
		return fmt.Errorf("Failed to resize replication controller %s to zero: %v", name, err)
	}

	if err := wait.Poll(time.Second, time.Minute*20, client.ControllerHasDesiredReplicas(c, rc)); err != nil {
		return fmt.Errorf("Error waiting for replication controller %s replicas to reach 0: %v", name, err)
	}

	// Delete the replication controller.
	if err := c.ReplicationControllers(ns).Delete(name); err != nil {
		return fmt.Errorf("Failed to delete replication controller %s: %v", name, err)
	}
	return nil
}
Пример #15
0
// A basic test to check the deployment of an image using
// a replication controller. The image serves its hostname
// which is checked for each replica.
func ServeImageOrFail(c *client.Client, test string, image string) {
	ns := api.NamespaceDefault
	name := "my-hostname-" + test + "-" + string(util.NewUUID())
	replicas := 2

	// Create a replication controller for a service
	// that serves its hostname.
	// The source for the Docker containter kubernetes/serve_hostname is
	// in contrib/for-demos/serve_hostname
	By(fmt.Sprintf("Creating replication controller %s", name))
	controller, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicas,
			Selector: map[string]string{
				"name": name,
			},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  name,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	// Cleanup the replication controller when we are done.
	defer func() {
		// Resize the replication controller to zero to get rid of pods.
		By("Cleaning up the replication controller")
		rcReaper, err := kubectl.ReaperFor("ReplicationController", c)
		if err != nil {
			Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
		}
		if _, err = rcReaper.Stop(ns, controller.Name); err != nil {
			Logf("Failed to stop replication controller %v: %v.", controller.Name, err)
		}
	}()

	// List the pods, making sure we observe all the replicas.
	listTimeout := time.Minute
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	pods, err := c.Pods(ns).List(label)
	Expect(err).NotTo(HaveOccurred())
	t := time.Now()
	for {
		Logf("Controller %s: Found %d pods out of %d", name, len(pods.Items), replicas)
		if len(pods.Items) == replicas {
			break
		}
		if time.Since(t) > listTimeout {
			Failf("Controller %s: Gave up waiting for %d pods to come up after seeing only %d pods after %v seconds",
				name, replicas, len(pods.Items), time.Since(t).Seconds())
		}
		time.Sleep(5 * time.Second)
		pods, err = c.Pods(ns).List(label)
		Expect(err).NotTo(HaveOccurred())
	}

	By("Ensuring each pod is running and has a hostIP")

	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	for _, pod := range pods.Items {
		err = waitForPodRunning(c, pod.Name)
		Expect(err).NotTo(HaveOccurred())
	}

	// Try to make sure we get a hostIP for each pod.
	hostIPTimeout := 2 * time.Minute
	t = time.Now()
	for i, pod := range pods.Items {
		for {
			p, err := c.Pods(ns).Get(pod.Name)
			Expect(err).NotTo(HaveOccurred())
			if p.Status.HostIP != "" {
				Logf("Controller %s: Replica %d has hostIP: %s", name, i+1, p.Status.HostIP)
				break
			}
			if time.Since(t) >= hostIPTimeout {
				Failf("Controller %s: Gave up waiting for hostIP of replica %d after %v seconds",
					name, i, time.Since(t).Seconds())
			}
			Logf("Controller %s: Retrying to get the hostIP of replica %d", name, i+1)
			time.Sleep(5 * time.Second)
		}
	}

	// Re-fetch the pod information to update the host port information.
	pods, err = c.Pods(ns).List(label)
	Expect(err).NotTo(HaveOccurred())

	// Verify that something is listening.
	By("Trying to dial each unique pod")

	for i, pod := range pods.Items {
		body, err := c.Get().
			Prefix("proxy").
			Resource("pods").
			Name(string(pod.Name)).
			Do().
			Raw()
		if err != nil {
			Failf("Controller %s: Failed to GET from replica %d: %v", name, i+1, err)
		}
		// The body should be the pod name.
		if string(body) != pod.Name {
			Failf("Controller %s: Replica %d expected response %s but got %s", name, i+1, pod.Name, string(body))
		}
		Logf("Controller %s: Got expected result from replica %d: %s", name, i+1, string(body))
	}
}
Пример #16
0
			podCapacity, found := node.Status.Capacity["pods"]
			Expect(found).To(Equal(true))
			totalPodCapacity += podCapacity.Value()
		}

		err = deleteTestingNS(c)
		expectNoError(err)

		nsForTesting, err := createTestingNS("maxp", c)
		ns = nsForTesting.Name
		expectNoError(err)
		uuid = string(util.NewUUID())
	})

	AfterEach(func() {
		rc, err := c.ReplicationControllers(ns).Get(RCName)
		if err == nil && rc.Spec.Replicas != 0 {
			By("Cleaning up the replication controller")
			err := DeleteRC(c, ns, RCName)
			expectNoError(err)
		}

		By(fmt.Sprintf("Destroying namespace for this suite %v", ns))
		if err := c.Namespaces().Delete(ns); err != nil {
			Failf("Couldn't delete ns %s", err)
		}
	})

	// This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable
	// and cannot be run in parallel with any other test that touches Nodes or Pods. It is so because to check
	// if max-pods is working we need to fully saturate the cluster and keep it in this state for few seconds.
Пример #17
0
		waitForServiceInAddonTest(c, namespace.Name, "addon-test-updated", true)
		waitForReplicationControllerInAddonTest(c, namespace.Name, "addon-test-v2", true)

		waitForServiceInAddonTest(c, namespace.Name, "addon-test", false)
		waitForReplicationControllerInAddonTest(c, defaultNsName, "addon-test-v1", false)

		By("remove manifests")
		sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv2))
		sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv2))

		waitForServiceInAddonTest(c, namespace.Name, "addon-test-updated", false)
		waitForReplicationControllerInAddonTest(c, namespace.Name, "addon-test-v2", false)

		By("verify invalid API addons weren't created")
		_, err = c.ReplicationControllers(namespace.Name).Get("invalid-addon-test-v1")
		Expect(err).To(HaveOccurred())
		_, err = c.ReplicationControllers(defaultNsName).Get("invalid-addon-test-v1")
		Expect(err).To(HaveOccurred())
		_, err = c.Services(namespace.Name).Get("ivalid-addon-test")
		Expect(err).To(HaveOccurred())
		_, err = c.Services(defaultNsName).Get("ivalid-addon-test")
		Expect(err).To(HaveOccurred())

		// invalid addons will be deleted by the deferred function
	})
})

func waitForServiceInAddonTest(c *client.Client, addonNamespace, name string, exist bool) {
	expectNoError(waitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
}
Пример #18
0
// newRCByName creates a replication controller with a selector by name of name.
func newRCByName(c *client.Client, ns, name string, replicas int) (*api.ReplicationController, error) {
	By(fmt.Sprintf("creating replication controller %s", name))
	return c.ReplicationControllers(ns).Create(rcByNamePort(
		name, replicas, serveHostnameImage, 9376, map[string]string{}))
}
Пример #19
0
		By("Creating a replication controller")
		rc, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{
			ObjectMeta: api.ObjectMeta{
				Name: name,
				Labels: map[string]string{
					"name": name,
				},
			},
			Spec: api.ReplicationControllerSpec{
				Replicas: 8,
				Selector: map[string]string{
					"name": name,
				},
				Template: &api.PodTemplateSpec{
					ObjectMeta: api.ObjectMeta{
						Labels: map[string]string{"name": name},
					},
					Spec: api.PodSpec{
						Containers: []api.Container{
							{
								Name:    "webserver",
								Image:   "kubernetes/nettest:latest",
								Command: []string{"-service=" + name},
								Ports:   []api.ContainerPort{{ContainerPort: 8080}},
							},
						},
					},
				},
			},
		})
		if err != nil {
Пример #20
0
func TestNetwork(c *client.Client) bool {
	ns := api.NamespaceDefault
	svc, err := c.Services(ns).Create(loadObjectOrDie(assetPath(
		"contrib", "for-tests", "network-tester", "service.json",
	)).(*api.Service))
	if err != nil {
		glog.Errorf("unable to create test service: %v", err)
		return false
	}
	// Clean up service
	defer func() {
		if err = c.Services(ns).Delete(svc.Name); err != nil {
			glog.Errorf("unable to delete svc %v: %v", svc.Name, err)
		}
	}()

	rc, err := c.ReplicationControllers(ns).Create(loadObjectOrDie(assetPath(
		"contrib", "for-tests", "network-tester", "rc.json",
	)).(*api.ReplicationController))
	if err != nil {
		glog.Errorf("unable to create test rc: %v", err)
		return false
	}
	// Clean up rc
	defer func() {
		rc.Spec.Replicas = 0
		rc, err = c.ReplicationControllers(ns).Update(rc)
		if err != nil {
			glog.Errorf("unable to modify replica count for rc %v: %v", rc.Name, err)
			return
		}
		if err = c.ReplicationControllers(ns).Delete(rc.Name); err != nil {
			glog.Errorf("unable to delete rc %v: %v", rc.Name, err)
		}
	}()

	const maxAttempts = 60
	for i := 0; i < maxAttempts; i++ {
		time.Sleep(time.Second)
		body, err := c.Get().Prefix("proxy").Resource("services").Name(svc.Name).Suffix("status").Do().Raw()
		if err != nil {
			glog.Infof("Attempt %v/%v: service/pod still starting. (error: '%v')", i, maxAttempts, err)
			continue
		}
		switch string(body) {
		case "pass":
			glog.Infof("Passed on attempt %v. Cleaning up.", i)
			return true
		case "running":
			glog.Infof("Attempt %v/%v: test still running", i, maxAttempts)
		case "fail":
			if body, err := c.Get().Prefix("proxy").Resource("services").Name(svc.Name).Suffix("read").Do().Raw(); err != nil {
				glog.Infof("Failed on attempt %v. Cleaning up. Error reading details: %v", i, err)
			} else {
				glog.Infof("Failed on attempt %v. Cleaning up. Details:\n%v", i, string(body))
			}
			return false
		}
	}

	if body, err := c.Get().Prefix("proxy").Resource("services").Name(svc.Name).Suffix("read").Do().Raw(); err != nil {
		glog.Infof("Timed out. Cleaning up. Error reading details: %v", err)
	} else {
		glog.Infof("Timed out. Cleaning up. Details:\n%v", string(body))
	}

	return false
}
Пример #21
0
// A basic test to check the deployment of an image using
// a replication controller. The image serves its hostname
// which is checked for each replica.
func ServeImageOrFail(c *client.Client, test string, image string) {
	ns := api.NamespaceDefault
	name := "my-hostname-" + test + "-" + string(util.NewUUID())
	replicas := 2

	// Create a replication controller for a service
	// that serves its hostname.
	// The source for the Docker containter kubernetes/serve_hostname is
	// in contrib/for-demos/serve_hostname
	By(fmt.Sprintf("Creating replication controller %s", name))
	controller, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicas,
			Selector: map[string]string{
				"name": name,
			},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  name,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 9376}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())
	// Cleanup the replication controller when we are done.
	defer func() {
		// Resize the replication controller to zero to get rid of pods.
		By("Cleaning up the replication controller")
		rcReaper, err := kubectl.ReaperFor("ReplicationController", c)
		if err != nil {
			Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
		}
		if _, err = rcReaper.Stop(ns, controller.Name, 0, nil); err != nil {
			Logf("Failed to stop replication controller %v: %v.", controller.Name, err)
		}
	}()

	// List the pods, making sure we observe all the replicas.
	listTimeout := time.Minute
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	pods, err := c.Pods(ns).List(label, fields.Everything())
	Expect(err).NotTo(HaveOccurred())
	t := time.Now()
	for {
		Logf("Controller %s: Found %d pods out of %d", name, len(pods.Items), replicas)
		if len(pods.Items) == replicas {
			break
		}
		if time.Since(t) > listTimeout {
			Failf("Controller %s: Gave up waiting for %d pods to come up after seeing only %d pods after %v seconds",
				name, replicas, len(pods.Items), time.Since(t).Seconds())
		}
		time.Sleep(5 * time.Second)
		pods, err = c.Pods(ns).List(label, fields.Everything())
		Expect(err).NotTo(HaveOccurred())
	}

	By("Ensuring each pod is running")

	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	for _, pod := range pods.Items {
		err = waitForPodRunning(c, pod.Name)
		Expect(err).NotTo(HaveOccurred())
	}

	// Verify that something is listening.
	By("Trying to dial each unique pod")
	retryTimeout := 2 * time.Minute
	retryInterval := 5 * time.Second
	err = wait.Poll(retryInterval, retryTimeout, podResponseChecker{c, ns, label, name, true, pods}.checkAllResponses)
	if err != nil {
		Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
	}
}
Пример #22
0
		waitForServiceInAddonTest(c, "addon-test-updated", true)
		waitForReplicationControllerInAddonTest(c, "addon-test-v2", true)

		waitForServiceInAddonTest(c, "addon-test", false)
		waitForReplicationControllerInAddonTest(c, "addon-test-v1", false)

		By("remove manifests")
		sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv2))
		sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv2))

		waitForServiceInAddonTest(c, "addon-test-updated", false)
		waitForReplicationControllerInAddonTest(c, "invalid-addon-test-v1", false)

		By("verify invalid API addons weren't created")
		_, err = c.ReplicationControllers(addonNamespace).Get("invalid-addon-test-v1")
		Expect(err).To(HaveOccurred())
		_, err = c.Services(addonNamespace).Get("ivalid-addon-test")
		Expect(err).To(HaveOccurred())

		// invalid addons will be deleted by the deferred function
	})
})

func waitForServiceInAddonTest(c *client.Client, name string, exist bool) {
	expectNoError(waitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
}

func waitForReplicationControllerInAddonTest(c *client.Client, name string, exist bool) {
	expectNoError(waitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
}
Пример #23
0
// Launch a Replication Controller and wait for all pods it spawns
// to become running. The controller will need to be cleaned up external
// to this method
func RunRC(c *client.Client, name string, ns, image string, replicas int) error {
	var last int
	current := 0
	same := 0

	By(fmt.Sprintf("Creating replication controller %s", name))
	_, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicas,
			Selector: map[string]string{
				"name": name,
			},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  name,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 80}},
						},
					},
				},
			},
		},
	})
	if err != nil {
		return fmt.Errorf("Error creating replication controller: %v", err)
	}

	By(fmt.Sprintf("Making sure all %d replicas exist", replicas))
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	pods, err := listPods(c, ns, label, fields.Everything())
	if err != nil {
		return fmt.Errorf("Error listing pods: %v", err)
	}
	current = len(pods.Items)
	failCount := 5
	for same < failCount && current < replicas {
		Logf("Controller %s: Found %d pods out of %d", name, current, replicas)
		if last < current {
			same = 0
		} else if last == current {
			same++
		} else if current < last {
			return fmt.Errorf("Controller %s: Number of submitted pods dropped from %d to %d", name, last, current)
		}

		if same >= failCount {
			return fmt.Errorf("Controller %s: No pods submitted for the last %d checks", name, failCount)
		}

		last = current
		time.Sleep(5 * time.Second)
		pods, err = listPods(c, ns, label, fields.Everything())
		if err != nil {
			return fmt.Errorf("Error listing pods: %v", err)
		}
		current = len(pods.Items)
	}
	if current != replicas {
		return fmt.Errorf("Controller %s: Only found %d replicas out of %d", name, current, replicas)
	}
	Logf("Controller %s: Found %d pods out of %d", name, current, replicas)

	By("Waiting for each pod to be running")
	same = 0
	last = 0
	failCount = 10
	current = 0
	for same < failCount && current < replicas {
		current = 0
		waiting := 0
		pending := 0
		unknown := 0
		time.Sleep(10 * time.Second)

		currentPods, err := listPods(c, ns, label, fields.Everything())
		if err != nil {
			return fmt.Errorf("Error listing pods: %v", err)
		}
		if len(currentPods.Items) != len(pods.Items) {
			return fmt.Errorf("Number of reported pods changed: %d vs %d", len(currentPods.Items), len(pods.Items))
		}
		for _, p := range currentPods.Items {
			if p.Status.Phase == api.PodRunning {
				current++
			} else if p.Status.Phase == api.PodPending {
				if p.Spec.Host == "" {
					waiting++
				} else {
					pending++
				}
			} else if p.Status.Phase == api.PodUnknown {
				unknown++
			}
		}
		Logf("Pod States: %d running, %d pending, %d waiting, %d unknown ", current, pending, waiting, unknown)
		if last < current {
			same = 0
		} else if last == current {
			same++
		} else if current < last {
			return fmt.Errorf("Number of running pods dropped from %d to %d", last, current)
		}
		if same >= failCount {
			return fmt.Errorf("No pods started for the last %d checks", failCount)
		}
		last = current
	}
	if current != replicas {
		return fmt.Errorf("Only %d pods started out of %d", current, replicas)
	}
	return nil
}
Пример #24
0
// Launch a Replication Controller and wait for all pods it spawns
// to become running
func RunRC(c *client.Client, name string, ns, image string, replicas int) {
	defer GinkgoRecover()

	var last int
	current := 0
	same := 0

	defer func() {
		By("Cleaning up the replication controller")
		err := DeleteRC(c, ns, name)
		Expect(err).NotTo(HaveOccurred())
	}()

	By(fmt.Sprintf("Creating replication controller %s", name))
	_, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicas,
			Selector: map[string]string{
				"name": name,
			},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  name,
							Image: image,
							Ports: []api.ContainerPort{{ContainerPort: 80}},
						},
					},
				},
			},
		},
	})
	Expect(err).NotTo(HaveOccurred())

	By(fmt.Sprintf("Making sure all %d replicas exist", replicas))
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	pods, err := c.Pods(ns).List(label)
	Expect(err).NotTo(HaveOccurred())
	current = len(pods.Items)
	failCount := 5
	for same < failCount && current < replicas {
		Logf("Controller %s: Found %d pods out of %d", name, current, replicas)
		if last < current {
			same = 0
		} else if last == current {
			same++
		} else if current < last {
			Failf("Controller %s: Number of submitted pods dropped from %d to %d", last, current)
		}

		if same >= failCount {
			Logf("No pods submitted for the last %d checks", failCount)
		}

		last = current
		time.Sleep(5 * time.Second)
		pods, err = c.Pods(ns).List(label)
		Expect(err).NotTo(HaveOccurred())
		current = len(pods.Items)
	}
	Expect(current).To(Equal(replicas))
	Logf("Controller %s: Found %d pods out of %d", name, current, replicas)

	By("Waiting for each pod to be running")
	same = 0
	last = 0
	failCount = 6
	unknown := 0
	pending := 0
	current = 0
	for same < failCount && current < replicas {
		current = 0
		pending = 0
		unknown = 0
		time.Sleep(10 * time.Second)
		for _, pod := range pods.Items {
			p, err := c.Pods(ns).Get(pod.Name)
			Expect(err).NotTo(HaveOccurred())
			if p.Status.Phase == api.PodRunning {
				current++
			} else if p.Status.Phase == api.PodPending {
				pending++
			} else if p.Status.Phase == api.PodUnknown {
				unknown++
			}
		}
		Logf("Pod States: %d running, %d pending, %d unknown ", current, pending, unknown)
		if last < current {
			same = 0
		} else if last == current {
			same++
		} else if current < last {
			Failf("Number of running pods dropped from %d to %d", last, current)
		}
		if same >= failCount {
			Logf("No pods started for the last %d checks", failCount)
		}
		last = current
	}
	Expect(current).To(Equal(replicas))
}
Пример #25
0
// A basic test to check the deployment of an image using
// a replication controller. The image serves its hostname
// which is checked for each replica.
func TestBasicImage(c *client.Client, image string) bool {
	ns := api.NamespaceDefault
	// TODO(satnam6502): Generate a unique name to allow
	//                   parallel executions of this test.
	name := "my-hostname"
	replicas := 2

	// Attmept to delete a controller that might have been
	// left lying around from a previously aborted test.
	controllers, err := c.ReplicationControllers(ns).List(labels.Everything())
	if err != nil {
		glog.Infof("Failed to list replication controllers: %v", err)
		return false
	}
	for _, cnt := range controllers.Items {
		if cnt.Name == name {
			glog.Infof("Found a straggler %s controller", name)
			// Delete any pods controlled by this replicaiton controller.
			cnt.Spec.Replicas = 0
			if _, err := c.ReplicationControllers(ns).Update(&cnt); err != nil {
				glog.Warningf("Failed to resize straggler controller to zero: %v", err)
			}
			// Delete the controller
			if err = c.ReplicationControllers(ns).Delete(name); err != nil {
				glog.Warningf("Failed to delete straggler replication controller: %v", err)
			}
			break
		}
	}

	// Create a replication controller  for a service
	// that serves its hostname on port 8080.
	// The source for the Docker containter kubernetes/serve_hostname is
	// in contrib/for-demos/serve_hostname
	glog.Infof("Creating replication controller %s", name)
	controller, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{
		ObjectMeta: api.ObjectMeta{
			Name: name,
		},
		Spec: api.ReplicationControllerSpec{
			Replicas: replicas,
			Selector: map[string]string{
				"name": name,
			},
			Template: &api.PodTemplateSpec{
				ObjectMeta: api.ObjectMeta{
					Labels: map[string]string{"name": name},
				},
				Spec: api.PodSpec{
					Containers: []api.Container{
						{
							Name:  name,
							Image: image,
							Ports: []api.Port{{ContainerPort: 9376, HostPort: 8080}},
						},
					},
				},
			},
		},
	})
	if err != nil {
		glog.Infof("Failed to create replication controller for %s: %v", name, err)
		return false
	}

	// List the pods.
	// pods, err := c.Pods(ns).List(labels.Set{"name": name}.AsSelector())
	pods, err := c.Pods(ns).List(labels.SelectorFromSet(labels.Set(map[string]string{"name": name})))
	if err != nil {
		glog.Errorf("Failed to list pods before wait for running check: %v", err)
		return false
	}
	for i, pod := range pods.Items {
		glog.Infof("Replica %d: %s\n", i+1, pod.Name)
	}

	// Wait for the pods to enter the running state. Waiting loops until the pods
	// are running so non-running pods cause a timeout for this test.
	for _, pod := range pods.Items {
		waitForPodRunning(c, pod.Name)
	}

	// List the pods again to get the host IP information.
	pods, err = c.Pods(ns).List(labels.SelectorFromSet(labels.Set(map[string]string{"name": name})))
	if err != nil {
		glog.Errorf("Failed to list pods after wait for running check: %v", err)
		return false
	}

	// Verify that something is listening.
	for i, pod := range pods.Items {
		glog.Infof("Pod: %s hostIP: %s", pod.Name, pod.Status.HostIP)
		if pod.Status.HostIP == "" {
			glog.Warningf("Skipping test of GET response since hostIP is not available")
			continue
		}
		resp, err := http.Get(fmt.Sprintf("http://%s:8080", pod.Status.HostIP))
		if err != nil {
			glog.Errorf("Failed to GET from replica %d: %v", i+1, err)
			return false
		}
		defer resp.Body.Close()
		if resp.StatusCode != http.StatusOK {
			glog.Errorf("Expected OK status code for replica %d but got %d", i+1, resp.StatusCode)
			return false
		}
		body, err := ioutil.ReadAll(resp.Body)
		if err != nil {
			glog.Errorf("Failed to read the body of the GET response from replica %d: %v", i+1, err)
			return false
		}
		// The body should be the pod name although we may need to skip a newline
		// character at the end of the response.
		if !strings.HasPrefix(string(body), pod.Name) {
			glog.Errorf("From replica %d expected response %s but got %s", i+1, pod.Name, string(body))
			return false
		}
		glog.Infof("Got expected result from replica %d: %s", i+1, string(body))
	}

	// Resize the replication controller to zero to get rid of pods.
	controller.Spec.Replicas = 0
	if _, err = c.ReplicationControllers(ns).Update(controller); err != nil {
		glog.Errorf("Failed to resize replication controller to zero: %v", err)
		return false
	}

	// Delete the replication controller.
	if err = c.ReplicationControllers(ns).Delete(name); err != nil {
		glog.Errorf("Failed to delete replication controller %s: %v", name, err)
		return false
	}

	return true
}