func TestKubernetesROService(c *client.Client) bool { svc := api.ServiceList{} err := c.Get(). Namespace("default"). AbsPath("/api/v1beta1/proxy/services/kubernetes-ro/api/v1beta1/services"). Do(). Into(&svc) if err != nil { glog.Errorf("unexpected error listing services using ro service: %v", err) return false } var foundRW, foundRO bool for i := range svc.Items { if svc.Items[i].Name == "kubernetes" { foundRW = true } if svc.Items[i].Name == "kubernetes-ro" { foundRO = true } } if !foundRW { glog.Error("no RW service found") } if !foundRO { glog.Error("no RO service found") } if !foundRW || !foundRO { return false } return true }
func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) { By("getting list of nodes") nodeList, err := c.Nodes().List(labels.Everything(), fields.Everything()) expectNoError(err) var errors []error retries := maxRetries for { errors = []error{} for _, node := range nodeList.Items { // cadvisor is not accessible directly unless its port (4194 by default) is exposed. // Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally. statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name) By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource)) _, err = c.Get().AbsPath(statsResource).Timeout(timeout).Do().Raw() if err != nil { errors = append(errors, err) } } if len(errors) == 0 { return } if retries--; retries <= 0 { break } Logf("failed to retrieve kubelet stats -\n %v", errors) time.Sleep(sleepDuration) } Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors) }
// Performs a get on a node proxy endpoint given the nodename and rest client. func nodeProxyRequest(c *client.Client, node, endpoint string) client.Result { return c.Get(). Prefix("proxy"). Resource("nodes"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). Suffix(endpoint). Do() }
// ListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector func NewListWatchFromClient(client *client.Client, resource string, namespace string, fieldSelector labels.Selector) *ListWatch { listFunc := func() (runtime.Object, error) { return client.Get().Namespace(namespace).Resource(resource).SelectorParam("fields", fieldSelector).Do().Get() } watchFunc := func(resourceVersion string) (watch.Interface, error) { return client.Get().Prefix("watch").Namespace(namespace).Resource(resource).SelectorParam("fields", fieldSelector).Param("resourceVersion", resourceVersion).Watch() } return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} }
func makeHttpRequestToService(c *client.Client, ns, service, path string) (string, error) { result, err := c.Get(). Prefix("proxy"). Namespace(ns). Resource("services"). Name(service). Suffix(path). Do(). Raw() return string(result), err }
func makeRequestToGuestbook(c *client.Client, cmd, value string) (string, error) { result, err := c.Get(). Prefix("proxy"). Resource("services"). Name("frontend"). Suffix("/index.php"). Param("cmd", cmd). Param("key", "messages"). Param("value", value). Do(). Raw() return string(result), err }
// Retrieve metrics from the kubelet server of the given node. func getKubeletMetricsThroughProxy(c *client.Client, node string) (string, error) { metric, err := c.Get(). Prefix("proxy"). Resource("nodes"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). Suffix("metrics"). Do(). Raw() if err != nil { return "", err } return string(metric), nil }
// testContainerOutputInNamespace runs the given pod in the given namespace and waits // for the first container in the podSpec to move into the 'Success' status. It retrieves // the container log and searches for lines of expected output. func testContainerOutputInNamespace(ns, scenarioName string, c *client.Client, pod *api.Pod, expectedOutput []string) { By(fmt.Sprintf("Creating a pod to test %v", scenarioName)) defer c.Pods(ns).Delete(pod.Name) if _, err := c.Pods(ns).Create(pod); err != nil { Failf("Failed to create pod: %v", err) } containerName := pod.Spec.Containers[0].Name // Wait for client pod to complete. expectNoError(waitForPodSuccess(c, pod.Name, containerName)) // Grab its logs. Get host first. podStatus, err := c.Pods(ns).Get(pod.Name) if err != nil { Failf("Failed to get pod status: %v", err) } By(fmt.Sprintf("Trying to get logs from host %s pod %s container %s: %v", podStatus.Spec.Host, podStatus.Name, containerName, err)) var logs []byte start := time.Now() // Sometimes the actual containers take a second to get started, try to get logs for 60s for time.Now().Sub(start) < (60 * time.Second) { logs, err = c.Get(). Prefix("proxy"). Resource("nodes"). Name(podStatus.Spec.Host). Suffix("containerLogs", ns, podStatus.Name, containerName). Do(). Raw() fmt.Sprintf("pod logs:%v\n", string(logs)) By(fmt.Sprintf("pod logs:%v\n", string(logs))) if strings.Contains(string(logs), "Internal Error") { By(fmt.Sprintf("Failed to get logs from host %q pod %q container %q: %v", podStatus.Spec.Host, podStatus.Name, containerName, string(logs))) time.Sleep(5 * time.Second) continue } break } for _, m := range expectedOutput { Expect(string(logs)).To(ContainSubstring(m), "%q in container output", m) } }
func runSelfLinkTest(c *client.Client) { var svc api.Service err := c.Post().Resource("services").Body( &api.Service{ ObjectMeta: api.ObjectMeta{ Name: "selflinktest", Labels: map[string]string{ "name": "selflinktest", }, }, Spec: api.ServiceSpec{ Port: 12345, // This is here because validation requires it. Selector: map[string]string{ "foo": "bar", }, }, }, ).Do().Into(&svc) if err != nil { glog.Fatalf("Failed creating selflinktest service: %v", err) } err = c.Get().AbsPath(svc.SelfLink).Do().Into(&svc) if err != nil { glog.Fatalf("Failed listing service with supplied self link '%v': %v", svc.SelfLink, err) } var svcList api.ServiceList err = c.Get().Resource("services").Do().Into(&svcList) if err != nil { glog.Fatalf("Failed listing services: %v", err) } err = c.Get().AbsPath(svcList.SelfLink).Do().Into(&svcList) if err != nil { glog.Fatalf("Failed listing services with supplied self link '%v': %v", svcList.SelfLink, err) } found := false for i := range svcList.Items { item := &svcList.Items[i] if item.Name != "selflinktest" { continue } found = true err = c.Get().AbsPath(item.SelfLink).Do().Into(&svc) if err != nil { glog.Fatalf("Failed listing service with supplied self link '%v': %v", item.SelfLink, err) } break } if !found { glog.Fatalf("never found selflinktest service") } glog.Infof("Self link test passed") // TODO: Should test PUT at some point, too. }
func getData(c *client.Client, podID string) (*updateDemoData, error) { body, err := c.Get(). Prefix("proxy"). Resource("pods"). Name(podID). Suffix("data.json"). Do(). Raw() if err != nil { return nil, err } Logf("got data: %s", body) var data updateDemoData err = json.Unmarshal(body, &data) return &data, err }
func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client *client.Client) { var failed []string expectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { failed = []string{} for _, fileName := range fileNames { if _, err := client.Get(). Prefix("proxy"). Resource("pods"). Namespace(pod.Namespace). Name(pod.Name). Suffix(fileDir, fileName). Do().Raw(); err != nil { Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err) failed = append(failed, fileName) } } if len(failed) == 0 { return true, nil } Logf("Lookups using %s failed for: %v\n", pod.Name, failed) return false, nil })) Expect(len(failed)).To(Equal(0)) }
func makeHttpRequestToService(c *client.Client, ns, service, path string, timeout time.Duration) (string, error) { var result []byte var err error for t := time.Now(); time.Since(t) < timeout; time.Sleep(poll) { result, err = c.Get(). Prefix("proxy"). Namespace(ns). Resource("services"). Name(service). Suffix(path). Do(). Raw() if err != nil { break } } return string(result), err }
func GetServerVersion(client *client.Client) (*version.Info, error) { body, err := client.Get().AbsPath("/version").Do().Raw() if err != nil { return nil, err } var info version.Info err = json.Unmarshal(body, &info) if err != nil { return nil, fmt.Errorf("Got '%s': %v", string(body), err) } return &info, nil }
func runSelfLinkTestOnNamespace(c *client.Client, namespace string) { svcBody := api.Service{ ObjectMeta: api.ObjectMeta{ Name: "selflinktest", Namespace: namespace, Labels: map[string]string{ "name": "selflinktest", }, }, Spec: api.ServiceSpec{ // This is here because validation requires it. Selector: map[string]string{ "foo": "bar", }, Ports: []api.ServicePort{{ Port: 12345, Protocol: "TCP", }}, SessionAffinity: "None", }, } services := c.Services(namespace) svc, err := services.Create(&svcBody) if err != nil { glog.Fatalf("Failed creating selflinktest service: %v", err) } err = c.Get().RequestURI(svc.SelfLink).Do().Into(svc) if err != nil { glog.Fatalf("Failed listing service with supplied self link '%v': %v", svc.SelfLink, err) } svcList, err := services.List(labels.Everything()) if err != nil { glog.Fatalf("Failed listing services: %v", err) } err = c.Get().RequestURI(svcList.SelfLink).Do().Into(svcList) if err != nil { glog.Fatalf("Failed listing services with supplied self link '%v': %v", svcList.SelfLink, err) } found := false for i := range svcList.Items { item := &svcList.Items[i] if item.Name != "selflinktest" { continue } found = true err = c.Get().RequestURI(item.SelfLink).Do().Into(svc) if err != nil { glog.Fatalf("Failed listing service with supplied self link '%v': %v", item.SelfLink, err) } break } if !found { glog.Fatalf("never found selflinktest service in namespace %s", namespace) } glog.Infof("Self link test passed in namespace %s", namespace) // TODO: Should test PUT at some point, too. }
By("retrieving the pod") pod, err := podClient.Get(pod.Name) if err != nil { Failf("Failed to get pod %s: %v", pod.Name, err) } // Try to find results for each expected name. By("looking for the results for each expected name") var failed []string for try := 1; try < 100; try++ { failed = []string{} for _, name := range namesToResolve { _, err := c.Get(). Prefix("proxy"). Resource("pods"). Namespace(api.NamespaceDefault). Name(pod.Name). Suffix("results", name). Do().Raw() if err != nil { failed = append(failed, name) fmt.Printf("Lookup using %s for %s failed: %v\n", pod.Name, name, err) } } if len(failed) == 0 { break } fmt.Printf("lookups using %s failed for: %v\n", pod.Name, failed) time.Sleep(10 * time.Second) } Expect(len(failed)).To(Equal(0))
// Grab its logs. Get host first. clientPodStatus, err := c.Pods(api.NamespaceDefault).Get(clientPod.Name) if err != nil { Fail(fmt.Sprintf("Failed to get clientPod to know host: %v", err)) } By(fmt.Sprintf("Trying to get logs from host %s pod %s container %s: %v", clientPodStatus.Status.Host, clientPodStatus.Name, clientPodStatus.Spec.Containers[0].Name, err)) var logs []byte start := time.Now() // Sometimes the actual containers take a second to get started, try to get logs for 60s for time.Now().Sub(start) < (60 * time.Second) { logs, err = c.Get(). Prefix("proxy"). Resource("minions"). Name(clientPodStatus.Status.Host). Suffix("containerLogs", api.NamespaceDefault, clientPodStatus.Name, clientPodStatus.Spec.Containers[0].Name). Do(). Raw() fmt.Sprintf("clientPod logs:%v\n", string(logs)) By(fmt.Sprintf("clientPod logs:%v\n", string(logs))) if strings.Contains(string(logs), "Internal Error") { By(fmt.Sprintf("Failed to get logs from host %s pod %s container %s: %v", clientPodStatus.Status.Host, clientPodStatus.Name, clientPodStatus.Spec.Containers[0].Name, string(logs))) time.Sleep(5 * time.Second) continue } break } toFind := []string{
// ClusterLevelLoggingWithElasticsearch is an end to end test for cluster level logging. func ClusterLevelLoggingWithElasticsearch(c *client.Client) { // TODO: For now assume we are only testing cluster logging with Elasticsearch // on GCE. Once we are sure that Elasticsearch cluster level logging // works for other providers we should widen this scope of this test. if !providerIs("gce") { Logf("Skipping cluster level logging test for provider %s", testContext.Provider) return } // Check for the existence of the Elasticsearch service. By("Checking the Elasticsearch service exists.") s := c.Services(api.NamespaceDefault) // Make a few attempts to connect. This makes the test robust against // being run as the first e2e test just after the e2e cluster has been created. var err error const graceTime = 10 * time.Minute for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { if _, err = s.Get("elasticsearch-logging"); err == nil { break } Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start)) } Expect(err).NotTo(HaveOccurred()) // Wait for the Elasticsearch pods to enter the running state. By("Checking to make sure the Elasticsearch pods are running") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "elasticsearch-logging"})) pods, err := c.Pods(api.NamespaceDefault).List(label, fields.Everything()) Expect(err).NotTo(HaveOccurred()) for _, pod := range pods.Items { err = waitForPodRunning(c, pod.Name) Expect(err).NotTo(HaveOccurred()) } By("Checking to make sure we are talking to an Elasticsearch service.") // Perform a few checks to make sure this looks like an Elasticsearch cluster. var statusCode float64 var esResponse map[string]interface{} err = nil for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { // Query against the root URL for Elasticsearch. body, err := c.Get(). Namespace(api.NamespaceDefault). Prefix("proxy"). Resource("services"). Name("elasticsearch-logging"). DoRaw() if err != nil { Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err) continue } esResponse, err = bodyToJSON(body) if err != nil { Logf("After %v failed to convert Elasticsearch JSON response %v to map[string]interface{}: %v", time.Since(start), string(body), err) continue } statusIntf, ok := esResponse["status"] if !ok { Logf("After %v Elasticsearch response has no status field: %v", time.Since(start), esResponse) continue } statusCode, ok = statusIntf.(float64) if !ok { // Assume this is a string returning Failure. Retry. Logf("After %v expected status to be a float64 but got %v of type %T", time.Since(start), statusIntf, statusIntf) continue } break } Expect(err).NotTo(HaveOccurred()) if int(statusCode) != 200 { Failf("Elasticsearch cluster has a bad status: %v", statusCode) } // Check to see if have a cluster_name field. clusterName, ok := esResponse["cluster_name"] if !ok { Failf("No cluster_name field in Elasticsearch response: %v", esResponse) } if clusterName != "kubernetes_logging" { Failf("Connected to wrong cluster %q (expecting kubernetes_logging)", clusterName) } // Now assume we really are talking to an Elasticsearch instance. // Check the cluster health. By("Checking health of Elasticsearch service.") body, err := c.Get(). Namespace(api.NamespaceDefault). Prefix("proxy"). Resource("services"). Name("elasticsearch-logging"). Suffix("_cluster/health"). Param("health", "pretty"). DoRaw() Expect(err).NotTo(HaveOccurred()) health, err := bodyToJSON(body) Expect(err).NotTo(HaveOccurred()) statusIntf, ok := health["status"] if !ok { Failf("No status field found in cluster health response: %v", health) } status := statusIntf.(string) if status != "green" && status != "yellow" { Failf("Cluster health has bad status: %s", status) } // Obtain a list of nodes so we can place one synthetic logger on each node. nodes, err := c.Nodes().List(labels.Everything(), fields.Everything()) if err != nil { Failf("Failed to list nodes: %v", err) } nodeCount := len(nodes.Items) if nodeCount == 0 { Failf("Failed to find any nodes") } // Create a unique root name for the resources in this test to permit // parallel executions of this test. // Use a unique namespace for the resources created in this test. ns := "es-logging-" + randomSuffix() name := "synthlogger" // Form a unique name to taint log lines to be colelcted. // Replace '-' characters with '_' to prevent the analyzer from breaking apart names. taintName := strings.Replace(ns+name, "-", "_", -1) // podNames records the names of the synthetic logging pods that are created in the // loop below. var podNames []string // countTo is the number of log lines emitted (and checked) for each synthetic logging pod. const countTo = 100 // Instantiate a synthetic logger pod on each node. for i, node := range nodes.Items { podName := fmt.Sprintf("%s-%d", name, i) _, err := c.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, Labels: map[string]string{"name": name}, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "synth-logger", Image: "gcr.io/google_containers/ubuntu:14.04", Command: []string{"bash", "-c", fmt.Sprintf("i=0; while ((i < %d)); do echo \"%d %s $i %s\"; i=$(($i+1)); done", countTo, i, taintName, podName)}, }, }, Host: node.Name, RestartPolicy: api.RestartPolicyNever, }, }) Expect(err).NotTo(HaveOccurred()) podNames = append(podNames, podName) } // Cleanup the pods when we are done. defer func() { for _, pod := range podNames { if err = c.Pods(ns).Delete(pod); err != nil { Logf("Failed to delete pod %s: %v", pod, err) } } }() // Wait for the syntehtic logging pods to finish. By("Waiting for the pods to succeed.") for _, pod := range podNames { err = waitForPodSuccessInNamespace(c, pod, "synth-logger", ns) Expect(err).NotTo(HaveOccurred()) } // Wait a bit for the log information to make it into Elasticsearch. time.Sleep(30 * time.Second) // Make several attempts to observe the logs ingested into Elasticsearch. By("Checking all the log lines were ingested into Elasticsearch") missing := 0 expected := nodeCount * countTo for start := time.Now(); time.Since(start) < graceTime; time.Sleep(10 * time.Second) { // Ask Elasticsearch to return all the log lines that were tagged with the underscore // verison of the name. Ask for twice as many log lines as we expect to check for // duplication bugs. body, err = c.Get(). Namespace(api.NamespaceDefault). Prefix("proxy"). Resource("services"). Name("elasticsearch-logging"). Suffix("_search"). Param("q", fmt.Sprintf("log:%s", taintName)). Param("size", strconv.Itoa(2*expected)). DoRaw() if err != nil { Logf("After %v failed to make proxy call to elasticsearch-logging: %v", time.Since(start), err) continue } response, err := bodyToJSON(body) if err != nil { Logf("After %v failed to unmarshal response: %v", time.Since(start), err) continue } hits, ok := response["hits"].(map[string]interface{}) if !ok { Failf("response[hits] not of the expected type: %T", response["hits"]) } totalF, ok := hits["total"].(float64) if !ok { Logf("After %v hits[total] not of the expected type: %T", time.Since(start), hits["total"]) continue } total := int(totalF) if total < expected { Logf("After %v expecting to find %d log lines but saw only %d", time.Since(start), expected, total) continue } h, ok := hits["hits"].([]interface{}) if !ok { Logf("After %v hits not of the expected type: %T", time.Since(start), hits["hits"]) continue } // Initialize data-structure for observing counts. observed := make([][]int, nodeCount) for i := range observed { observed[i] = make([]int, countTo) } // Iterate over the hits and populate the observed array. for _, e := range h { l, ok := e.(map[string]interface{}) if !ok { Failf("element of hit not of expected type: %T", e) } source, ok := l["_source"].(map[string]interface{}) if !ok { Failf("_source not of the expected type: %T", l["_source"]) } msg, ok := source["log"].(string) if !ok { Failf("log not of the expected type: %T", source["log"]) } words := strings.Split(msg, " ") if len(words) < 4 { Failf("Malformed log line: %s", msg) } n, err := strconv.ParseUint(words[0], 10, 0) if err != nil { Failf("Expecting numer of node as first field of %s", msg) } if n < 0 || int(n) >= nodeCount { Failf("Node count index out of range: %d", nodeCount) } index, err := strconv.ParseUint(words[2], 10, 0) if err != nil { Failf("Expecting number as third field of %s", msg) } if index < 0 || index >= countTo { Failf("Index value out of range: %d", index) } // Record the observation of a log line from node n at the given index. observed[n][index]++ } // Make sure we correctly observed the expected log lines from each node. missing = 0 for n := range observed { for i, c := range observed[n] { if c == 0 { missing++ } if c < 0 || c > 1 { Failf("Got incorrect count for node %d index %d: %d", n, i, c) } } } if missing != 0 { Logf("After %v still missing %d log lines", time.Since(start), missing) continue } Logf("After %s found all %d log lines", time.Since(start), expected) return } Failf("Failed to find all %d log lines", expected) }
func runAtomicPutTest(c *client.Client) { var svc api.Service err := c.Post().Path("services").Body( api.Service{ JSONBase: api.JSONBase{ID: "atomicservice", APIVersion: "v1beta1"}, Port: 12345, Labels: map[string]string{ "name": "atomicService", }, // This is here because validation requires it. Selector: map[string]string{ "foo": "bar", }, }, ).Do().Into(&svc) if err != nil { glog.Fatalf("Failed creating atomicService: %v", err) } glog.Info("Created atomicService") testLabels := labels.Set{ "foo": "bar", } for i := 0; i < 5; i++ { // a: z, b: y, etc... testLabels[string([]byte{byte('a' + i)})] = string([]byte{byte('z' - i)}) } var wg sync.WaitGroup wg.Add(len(testLabels)) for label, value := range testLabels { go func(l, v string) { for { glog.Infof("Starting to update (%s, %s)", l, v) var tmpSvc api.Service err := c.Get(). Path("services"). Path(svc.ID). PollPeriod(100 * time.Millisecond). Do(). Into(&tmpSvc) if err != nil { glog.Errorf("Error getting atomicService: %v", err) continue } if tmpSvc.Selector == nil { tmpSvc.Selector = map[string]string{l: v} } else { tmpSvc.Selector[l] = v } glog.Infof("Posting update (%s, %s)", l, v) err = c.Put().Path("services").Path(svc.ID).Body(&tmpSvc).Do().Error() if err != nil { if se, ok := err.(*client.StatusErr); ok { if se.Status.Code == http.StatusConflict { glog.Infof("Conflict: (%s, %s)", l, v) // This is what we expect. continue } } glog.Errorf("Unexpected error putting atomicService: %v", err) continue } break } glog.Infof("Done update (%s, %s)", l, v) wg.Done() }(label, value) } wg.Wait() if err := c.Get().Path("services").Path(svc.ID).Do().Into(&svc); err != nil { glog.Fatalf("Failed getting atomicService after writers are complete: %v", err) } if !reflect.DeepEqual(testLabels, labels.Set(svc.Selector)) { glog.Fatalf("Selector PUTs were not atomic: wanted %v, got %v", testLabels, svc.Selector) } glog.Info("Atomic PUTs work.") }
// TestPodHasServiceEnvVars checks that kubelets and scheduler send events about pods scheduling and running. func TestPodHasServiceEnvVars(c *client.Client) bool { // Make a pod that will be a service. // This pod serves its hostname via HTTP. serverPod := parsePodOrDie(`{ "kind": "Pod", "apiVersion": "v1beta1", "id": "srv", "desiredState": { "manifest": { "version": "v1beta1", "id": "srv", "containers": [{ "name": "srv", "image": "kubernetes/serve_hostname", "ports": [{ "containerPort": 80, "hostPort": 8080 }] }] } }, "labels": { "name": "srv" } }`) _, err := c.Pods(api.NamespaceDefault).Create(serverPod) if err != nil { glog.Errorf("Failed to create serverPod: %v", err) return false } defer c.Pods(api.NamespaceDefault).Delete(serverPod.Name) waitForPodRunning(c, serverPod.Name) // This service exposes pod p's port 8080 as a service on port 8765 svc := parseServiceOrDie(`{ "id": "fooservice", "kind": "Service", "apiVersion": "v1beta1", "port": 8765, "containerPort": 8080, "selector": { "name": "p" } }`) if err != nil { glog.Errorf("Failed to delete service: %v", err) return false } time.Sleep(2) _, err = c.Services(api.NamespaceDefault).Create(svc) if err != nil { glog.Errorf("Failed to create service: %v", err) return false } defer c.Services(api.NamespaceDefault).Delete(svc.Name) // TODO: we don't have a way to wait for a service to be "running". // If this proves flaky, then we will need to retry the clientPod or insert a sleep. // Make a client pod that verifies that it has the service environment variables. clientPod := parsePodOrDie(`{ "apiVersion": "v1beta1", "kind": "Pod", "id": "env3", "desiredState": { "manifest": { "version": "v1beta1", "id": "env3", "restartPolicy": { "never": {} }, "containers": [{ "name": "env3cont", "image": "busybox", "command": ["sh", "-c", "env"] }] } }, "labels": { "name": "env3" } }`) _, err = c.Pods(api.NamespaceDefault).Create(clientPod) if err != nil { glog.Errorf("Failed to create pod: %v", err) return false } defer c.Pods(api.NamespaceDefault).Delete(clientPod.Name) // Wait for client pod to complete. success := waitForPodSuccess(c, clientPod.Name, clientPod.Spec.Containers[0].Name) if !success { glog.Errorf("Failed to run client pod to detect service env vars.") } // Grab its logs. Get host first. clientPodStatus, err := c.Pods(api.NamespaceDefault).Get(clientPod.Name) if err != nil { glog.Errorf("Failed to get clientPod to know host: %v", err) return false } glog.Infof("Trying to get logs from host %s pod %s container %s: %v", clientPodStatus.Status.Host, clientPodStatus.Name, clientPodStatus.Spec.Containers[0].Name, err) logs, err := c.Get(). Prefix("proxy"). Resource("minions"). Name(clientPodStatus.Status.Host). Suffix("containerLogs", api.NamespaceDefault, clientPodStatus.Name, clientPodStatus.Spec.Containers[0].Name). Do(). Raw() if err != nil { glog.Errorf("Failed to get logs from host %s pod %s container %s: %v", clientPodStatus.Status.Host, clientPodStatus.Name, clientPodStatus.Spec.Containers[0].Name, err) return false } glog.Info("clientPod logs:", string(logs)) toFind := []string{ "FOOSERVICE_SERVICE_HOST=", "FOOSERVICE_SERVICE_PORT=", "FOOSERVICE_PORT=", "FOOSERVICE_PORT_8765_TCP_PORT=", "FOOSERVICE_PORT_8765_TCP_PROTO=", "FOOSERVICE_PORT_8765_TCP=", "FOOSERVICE_PORT_8765_TCP_ADDR=", } for _, m := range toFind { if !strings.Contains(string(logs), m) { glog.Errorf("Unable to find env var %q in client env vars.", m) success = false } } // We could try a wget the service from the client pod. But services.sh e2e test covers that pretty well. return success }
rcReaper, err := kubectl.ReaperFor("ReplicationController", c) if err != nil { Fail(fmt.Sprintf("unable to stop rc %v: %v", rc.Name, err)) } if _, err = rcReaper.Stop(ns, rc.Name); err != nil { Fail(fmt.Sprintf("unable to stop rc %v: %v", rc.Name, err)) } }() By("Waiting for connectivity to be verified") const maxAttempts = 60 passed := false var body []byte for i := 0; i < maxAttempts && !passed; i++ { time.Sleep(2 * time.Second) body, err = c.Get().Prefix("proxy").Resource("services").Name(svc.Name).Suffix("status").Do().Raw() if err != nil { fmt.Printf("Attempt %v/%v: service/pod still starting. (error: '%v')\n", i, maxAttempts, err) continue } switch string(body) { case "pass": fmt.Printf("Passed on attempt %v. Cleaning up.\n", i) passed = true break case "running": fmt.Printf("Attempt %v/%v: test still running\n", i, maxAttempts) break case "fail": if body, err = c.Get().Prefix("proxy").Resource("services").Name(svc.Name).Suffix("read").Do().Raw(); err != nil { Fail(fmt.Sprintf("Failed on attempt %v. Cleaning up. Error reading details: %v", i, err))
func TestNetwork(c *client.Client) bool { ns := api.NamespaceDefault svc, err := c.Services(ns).Create(loadObjectOrDie(assetPath( "contrib", "for-tests", "network-tester", "service.json", )).(*api.Service)) if err != nil { glog.Errorf("unable to create test service: %v", err) return false } // Clean up service defer func() { if err = c.Services(ns).Delete(svc.Name); err != nil { glog.Errorf("unable to delete svc %v: %v", svc.Name, err) } }() rc, err := c.ReplicationControllers(ns).Create(loadObjectOrDie(assetPath( "contrib", "for-tests", "network-tester", "rc.json", )).(*api.ReplicationController)) if err != nil { glog.Errorf("unable to create test rc: %v", err) return false } // Clean up rc defer func() { rc.Spec.Replicas = 0 rc, err = c.ReplicationControllers(ns).Update(rc) if err != nil { glog.Errorf("unable to modify replica count for rc %v: %v", rc.Name, err) return } if err = c.ReplicationControllers(ns).Delete(rc.Name); err != nil { glog.Errorf("unable to delete rc %v: %v", rc.Name, err) } }() const maxAttempts = 60 for i := 0; i < maxAttempts; i++ { time.Sleep(time.Second) body, err := c.Get().Prefix("proxy").Resource("services").Name(svc.Name).Suffix("status").Do().Raw() if err != nil { glog.Infof("Attempt %v/%v: service/pod still starting. (error: '%v')", i, maxAttempts, err) continue } switch string(body) { case "pass": glog.Infof("Passed on attempt %v. Cleaning up.", i) return true case "running": glog.Infof("Attempt %v/%v: test still running", i, maxAttempts) case "fail": if body, err := c.Get().Prefix("proxy").Resource("services").Name(svc.Name).Suffix("read").Do().Raw(); err != nil { glog.Infof("Failed on attempt %v. Cleaning up. Error reading details: %v", i, err) } else { glog.Infof("Failed on attempt %v. Cleaning up. Details:\n%v", i, string(body)) } return false } } if body, err := c.Get().Prefix("proxy").Resource("services").Name(svc.Name).Suffix("read").Do().Raw(); err != nil { glog.Infof("Timed out. Cleaning up. Error reading details: %v", err) } else { glog.Infof("Timed out. Cleaning up. Details:\n%v", string(body)) } return false }
// TestClusterDNS checks that cluster DNS works. func TestClusterDNS(c *client.Client) bool { // TODO: // https://github.com/GoogleCloudPlatform/kubernetes/issues/3305 // (but even if it's fixed, this will need a version check for // skewed version tests) if os.Getenv("KUBERNETES_PROVIDER") == "gke" { glog.Infof("skipping TestClusterDNS on gke") return true } podClient := c.Pods(api.NamespaceDefault) //TODO: Wait for skyDNS // All the names we need to be able to resolve. namesToResolve := []string{ "kubernetes-ro", "kubernetes-ro.default", "kubernetes-ro.default.kubernetes.local", "google.com", } probeCmd := "for i in `seq 1 600`; do " for _, name := range namesToResolve { probeCmd += fmt.Sprintf("wget -O /dev/null %s && echo OK > /results/%s;", name, name) } probeCmd += "sleep 1; done" // Run a pod which probes DNS and exposes the results by HTTP. pod := &api.Pod{ TypeMeta: api.TypeMeta{ Kind: "Pod", APIVersion: "v1beta1", }, ObjectMeta: api.ObjectMeta{ Name: "dns-test", }, Spec: api.PodSpec{ Volumes: []api.Volume{ { Name: "results", Source: &api.VolumeSource{ EmptyDir: &api.EmptyDir{}, }, }, }, Containers: []api.Container{ { Name: "webserver", Image: "kubernetes/test-webserver", VolumeMounts: []api.VolumeMount{ { Name: "results", MountPath: "/results", }, }, }, { Name: "pinger", Image: "busybox", Command: []string{"sh", "-c", probeCmd}, VolumeMounts: []api.VolumeMount{ { Name: "results", MountPath: "/results", }, }, }, }, }, } _, err := podClient.Create(pod) if err != nil { glog.Errorf("Failed to create dns-test pod: %v", err) return false } defer podClient.Delete(pod.Name) waitForPodRunning(c, pod.Name) pod, err = podClient.Get(pod.Name) if err != nil { glog.Errorf("Failed to get pod: %v", err) return false } // Try to find results for each expected name. var failed []string for try := 1; try < 100; try++ { failed = []string{} for _, name := range namesToResolve { _, err := c.Get(). Prefix("proxy"). Resource("pods"). Namespace("default"). Name(pod.Name). Suffix("results", name). Do().Raw() if err != nil { failed = append(failed, name) } } if len(failed) == 0 { break } glog.Infof("lookups failed for: %v", failed) time.Sleep(3 * time.Second) } if len(failed) != 0 { glog.Errorf("DNS failed for: %v", failed) return false } // TODO: probe from the host, too. glog.Info("DNS probes succeeded") return true }
func runSelfLinkTestOnNamespace(c *client.Client, namespace string) { var svc api.Service err := c.Post(). NamespaceIfScoped(namespace, len(namespace) > 0). Resource("services").Body( &api.Service{ ObjectMeta: api.ObjectMeta{ Name: "selflinktest", Namespace: namespace, Labels: map[string]string{ "name": "selflinktest", }, }, Spec: api.ServiceSpec{ Port: 12345, // This is here because validation requires it. Selector: map[string]string{ "foo": "bar", }, Protocol: "TCP", SessionAffinity: "None", }, }, ).Do().Into(&svc) if err != nil { glog.Fatalf("Failed creating selflinktest service: %v", err) } // TODO: this is not namespace aware err = c.Get().RequestURI(svc.SelfLink).Do().Into(&svc) if err != nil { glog.Fatalf("Failed listing service with supplied self link '%v': %v", svc.SelfLink, err) } var svcList api.ServiceList err = c.Get().NamespaceIfScoped(namespace, len(namespace) > 0).Resource("services").Do().Into(&svcList) if err != nil { glog.Fatalf("Failed listing services: %v", err) } err = c.Get().RequestURI(svcList.SelfLink).Do().Into(&svcList) if err != nil { glog.Fatalf("Failed listing services with supplied self link '%v': %v", svcList.SelfLink, err) } found := false for i := range svcList.Items { item := &svcList.Items[i] if item.Name != "selflinktest" { continue } found = true err = c.Get().RequestURI(item.SelfLink).Do().Into(&svc) if err != nil { glog.Fatalf("Failed listing service with supplied self link '%v': %v", item.SelfLink, err) } break } if !found { glog.Fatalf("never found selflinktest service in namespace %s", namespace) } glog.Infof("Self link test passed in namespace %s", namespace) // TODO: Should test PUT at some point, too. }
func runPatchTest(c *client.Client) { name := "patchservice" resource := "services" var svc api.Service err := c.Post().Resource(resource).Body( &api.Service{ TypeMeta: api.TypeMeta{ APIVersion: latest.Version, }, ObjectMeta: api.ObjectMeta{ Name: name, Labels: map[string]string{ "name": name, }, }, Spec: api.ServiceSpec{ Port: 12345, // This is here because validation requires it. Selector: map[string]string{ "foo": "bar", }, Protocol: "TCP", SessionAffinity: "None", }, }, ).Do().Into(&svc) if err != nil { glog.Fatalf("Failed creating patchservice: %v", err) } if len(svc.Labels) != 1 { glog.Fatalf("Original length does not equal one") } // add label _, err = c.Patch().Resource(resource).Name(name).Body([]byte("{\"labels\":{\"foo\":\"bar\"}}")).Do().Get() if err != nil { glog.Fatalf("Failed updating patchservice: %v", err) } err = c.Get().Resource(resource).Name(name).Do().Into(&svc) if err != nil { glog.Fatalf("Failed getting patchservice: %v", err) } if len(svc.Labels) != 2 || svc.Labels["foo"] != "bar" { glog.Fatalf("Failed updating patchservice, labels are: %v", svc.Labels) } // remove one label _, err = c.Patch().Resource(resource).Name(name).Body([]byte("{\"labels\":{\"name\":null}}")).Do().Get() if err != nil { glog.Fatalf("Failed updating patchservice: %v", err) } err = c.Get().Resource(resource).Name(name).Do().Into(&svc) if err != nil { glog.Fatalf("Failed getting patchservice: %v", err) } if len(svc.Labels) != 1 || svc.Labels["foo"] != "bar" { glog.Fatalf("Failed updating patchservice, labels are: %v", svc.Labels) } // remove all labels _, err = c.Patch().Resource(resource).Name(name).Body([]byte("{\"labels\":null}")).Do().Get() if err != nil { glog.Fatalf("Failed updating patchservice: %v", err) } err = c.Get().Resource(resource).Name(name).Do().Into(&svc) if err != nil { glog.Fatalf("Failed getting patchservice: %v", err) } if svc.Labels != nil { glog.Fatalf("Failed remove all labels from patchservice: %v", svc.Labels) } glog.Info("PATCHs work.") }
func testPreStop(c *client.Client, ns string) { // This is the server that will receive the preStop notification podDescr := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "server", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "server", Image: "gcr.io/google_containers/nettest:1.6", Ports: []api.ContainerPort{{ContainerPort: 8080}}, }, }, }, } By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns)) _, err := c.Pods(ns).Create(podDescr) expectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) // At the end of the test, clean up by removing the pod. defer func() { By("Deleting the server pod") c.Pods(ns).Delete(podDescr.Name, nil) }() By("Waiting for pods to come up.") err = waitForPodRunningInNamespace(c, podDescr.Name, ns) expectNoError(err, "waiting for server pod to start") val := "{\"Source\": \"prestop\"}" podOut, err := c.Pods(ns).Get(podDescr.Name) expectNoError(err, "getting pod info") preStopDescr := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "tester", }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "tester", Image: "gcr.io/google_containers/busybox", Command: []string{"sleep", "600"}, Lifecycle: &api.Lifecycle{ PreStop: &api.Handler{ Exec: &api.ExecAction{ Command: []string{ "wget", "-O-", "--post-data=" + val, fmt.Sprintf("http://%s:8080/write", podOut.Status.PodIP), }, }, }, }, }, }, }, } By(fmt.Sprintf("Creating tester pod %s in namespace %s", podDescr.Name, ns)) _, err = c.Pods(ns).Create(preStopDescr) expectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name)) deletePreStop := true // At the end of the test, clean up by removing the pod. defer func() { if deletePreStop { By("Deleting the tester pod") c.Pods(ns).Delete(preStopDescr.Name, nil) } }() err = waitForPodRunningInNamespace(c, preStopDescr.Name, ns) expectNoError(err, "waiting for tester pod to start") // Delete the pod with the preStop handler. By("Deleting pre-stop pod") if err := c.Pods(ns).Delete(preStopDescr.Name, nil); err == nil { deletePreStop = false } expectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name)) // Validate that the server received the web poke. err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) { if body, err := c.Get(). Namespace(ns).Prefix("proxy"). Resource("pods"). Name(podDescr.Name). Suffix("read"). DoRaw(); err != nil { By(fmt.Sprintf("Error validating prestop: %v", err)) } else { Logf("Saw: %s", string(body)) state := State{} err := json.Unmarshal(body, &state) if err != nil { Logf("Error parsing: %v", err) return false, nil } if state.Received["prestop"] != 0 { return true, nil } } return false, nil }) expectNoError(err, "validating pre-stop.") }
func runAtomicPutTest(c *client.Client) { var svc api.Service err := c.Post().Resource("services").Body( &api.Service{ TypeMeta: api.TypeMeta{ APIVersion: latest.Version, }, ObjectMeta: api.ObjectMeta{ Name: "atomicservice", Labels: map[string]string{ "name": "atomicService", }, }, Spec: api.ServiceSpec{ Port: 12345, // This is here because validation requires it. Selector: map[string]string{ "foo": "bar", }, }, }, ).Do().Into(&svc) if err != nil { glog.Fatalf("Failed creating atomicService: %v", err) } glog.Info("Created atomicService") testLabels := labels.Set{ "foo": "bar", } for i := 0; i < 5; i++ { // a: z, b: y, etc... testLabels[string([]byte{byte('a' + i)})] = string([]byte{byte('z' - i)}) } var wg sync.WaitGroup wg.Add(len(testLabels)) for label, value := range testLabels { go func(l, v string) { for { glog.Infof("Starting to update (%s, %s)", l, v) var tmpSvc api.Service err := c.Get(). Resource("services"). Name(svc.Name). Do(). Into(&tmpSvc) if err != nil { glog.Errorf("Error getting atomicService: %v", err) continue } if tmpSvc.Spec.Selector == nil { tmpSvc.Spec.Selector = map[string]string{l: v} } else { tmpSvc.Spec.Selector[l] = v } glog.Infof("Posting update (%s, %s)", l, v) err = c.Put().Resource("services").Name(svc.Name).Body(&tmpSvc).Do().Error() if err != nil { if errors.IsConflict(err) { glog.Infof("Conflict: (%s, %s)", l, v) // This is what we expect. continue } glog.Errorf("Unexpected error putting atomicService: %v", err) continue } break } glog.Infof("Done update (%s, %s)", l, v) wg.Done() }(label, value) } wg.Wait() if err := c.Get().Resource("services").Name(svc.Name).Do().Into(&svc); err != nil { glog.Fatalf("Failed getting atomicService after writers are complete: %v", err) } if !reflect.DeepEqual(testLabels, labels.Set(svc.Spec.Selector)) { glog.Fatalf("Selector PUTs were not atomic: wanted %v, got %v", testLabels, svc.Spec.Selector) } glog.Info("Atomic PUTs work.") }
// Start a client pod using given VolumeSource (exported by startVolumeServer()) // and check that the pod sees the data from the server pod. func testVolumeClient(client *client.Client, config VolumeTestConfig, volume api.VolumeSource, expectedContent string) { By(fmt.Sprint("starting ", config.prefix, " client")) podClient := client.Pods(config.namespace) clientPod := &api.Pod{ TypeMeta: api.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: api.ObjectMeta{ Name: config.prefix + "-client", Labels: map[string]string{ "role": config.prefix + "-client", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: config.prefix + "-client", Image: "gcr.io/google_containers/nginx:1.7.9", Ports: []api.ContainerPort{ { Name: "web", ContainerPort: 80, Protocol: api.ProtocolTCP, }, }, VolumeMounts: []api.VolumeMount{ { Name: config.prefix + "-volume", MountPath: "/usr/share/nginx/html", }, }, }, }, Volumes: []api.Volume{ { Name: config.prefix + "-volume", VolumeSource: volume, }, }, }, } if _, err := podClient.Create(clientPod); err != nil { Failf("Failed to create %s pod: %v", clientPod.Name, err) } expectNoError(waitForPodRunningInNamespace(client, clientPod.Name, config.namespace)) By("reading a web page from the client") body, err := client.Get(). Namespace(config.namespace). Prefix("proxy"). Resource("pods"). Name(clientPod.Name). DoRaw() expectNoError(err, "Cannot read web page: %v", err) Logf("body: %v", string(body)) By("checking the page content") Expect(body).To(ContainSubstring(expectedContent)) }
func runMasterServiceTest(client *client.Client) { time.Sleep(12 * time.Second) var svcList api.ServiceList err := client.Get(). Namespace("default"). Resource("services"). Do(). Into(&svcList) if err != nil { glog.Fatalf("unexpected error listing services: %v", err) } var foundRW, foundRO bool found := util.StringSet{} for i := range svcList.Items { found.Insert(svcList.Items[i].Name) if svcList.Items[i].Name == "kubernetes" { foundRW = true } if svcList.Items[i].Name == "kubernetes-ro" { foundRO = true } } if foundRW { var ep api.Endpoints err := client.Get(). Namespace("default"). Resource("endpoints"). Name("kubernetes"). Do(). Into(&ep) if err != nil { glog.Fatalf("unexpected error listing endpoints for kubernetes service: %v", err) } if len(ep.Endpoints) == 0 { glog.Fatalf("no endpoints for kubernetes service: %v", ep) } } else { glog.Errorf("no RW service found: %v", found) } if foundRO { var ep api.Endpoints err := client.Get(). Namespace("default"). Resource("endpoints"). Name("kubernetes-ro"). Do(). Into(&ep) if err != nil { glog.Fatalf("unexpected error listing endpoints for kubernetes service: %v", err) } if len(ep.Endpoints) == 0 { glog.Fatalf("no endpoints for kubernetes service: %v", ep) } } else { glog.Errorf("no RO service found: %v", found) } if !foundRW || !foundRO { glog.Fatalf("Kubernetes service test failed: %v", found) } glog.Infof("Master service test passed.") }
// A basic test to check the deployment of an image using // a replication controller. The image serves its hostname // which is checked for each replica. func ServeImageOrFail(c *client.Client, test string, image string) { ns := api.NamespaceDefault name := "my-hostname-" + test + "-" + string(util.NewUUID()) replicas := 2 // Create a replication controller for a service // that serves its hostname. // The source for the Docker containter kubernetes/serve_hostname is // in contrib/for-demos/serve_hostname By(fmt.Sprintf("Creating replication controller %s", name)) controller, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.ReplicationControllerSpec{ Replicas: replicas, Selector: map[string]string{ "name": name, }, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: map[string]string{"name": name}, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: name, Image: image, Ports: []api.ContainerPort{{ContainerPort: 9376}}, }, }, }, }, }, }) Expect(err).NotTo(HaveOccurred()) // Cleanup the replication controller when we are done. defer func() { // Resize the replication controller to zero to get rid of pods. By("Cleaning up the replication controller") rcReaper, err := kubectl.ReaperFor("ReplicationController", c) if err != nil { Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) } if _, err = rcReaper.Stop(ns, controller.Name); err != nil { Logf("Failed to stop replication controller %v: %v.", controller.Name, err) } }() // List the pods, making sure we observe all the replicas. listTimeout := time.Minute label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) pods, err := c.Pods(ns).List(label) Expect(err).NotTo(HaveOccurred()) t := time.Now() for { Logf("Controller %s: Found %d pods out of %d", name, len(pods.Items), replicas) if len(pods.Items) == replicas { break } if time.Since(t) > listTimeout { Failf("Controller %s: Gave up waiting for %d pods to come up after seeing only %d pods after %v seconds", name, replicas, len(pods.Items), time.Since(t).Seconds()) } time.Sleep(5 * time.Second) pods, err = c.Pods(ns).List(label) Expect(err).NotTo(HaveOccurred()) } By("Ensuring each pod is running and has a hostIP") // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. for _, pod := range pods.Items { err = waitForPodRunning(c, pod.Name) Expect(err).NotTo(HaveOccurred()) } // Try to make sure we get a hostIP for each pod. hostIPTimeout := 2 * time.Minute t = time.Now() for i, pod := range pods.Items { for { p, err := c.Pods(ns).Get(pod.Name) Expect(err).NotTo(HaveOccurred()) if p.Status.HostIP != "" { Logf("Controller %s: Replica %d has hostIP: %s", name, i+1, p.Status.HostIP) break } if time.Since(t) >= hostIPTimeout { Failf("Controller %s: Gave up waiting for hostIP of replica %d after %v seconds", name, i, time.Since(t).Seconds()) } Logf("Controller %s: Retrying to get the hostIP of replica %d", name, i+1) time.Sleep(5 * time.Second) } } // Re-fetch the pod information to update the host port information. pods, err = c.Pods(ns).List(label) Expect(err).NotTo(HaveOccurred()) // Verify that something is listening. By("Trying to dial each unique pod") for i, pod := range pods.Items { body, err := c.Get(). Prefix("proxy"). Resource("pods"). Name(string(pod.Name)). Do(). Raw() if err != nil { Failf("Controller %s: Failed to GET from replica %d: %v", name, i+1, err) } // The body should be the pod name. if string(body) != pod.Name { Failf("Controller %s: Replica %d expected response %s but got %s", name, i+1, pod.Name, string(body)) } Logf("Controller %s: Got expected result from replica %d: %s", name, i+1, string(body)) } }