func watchOnce(etcdClient *etcd.Client, kubeClient *kclient.Client) { // Start the goroutine to produce update events. updates := make(chan serviceUpdate) startWatching(kubeClient.Services(kapi.NamespaceAll), updates) // This loop will break if the channel closes, which is how the // goroutine signals an error. for ev := range updates { if *verbose { log.Printf("Received update event: %#v", ev) } switch ev.Op { case SetServices, AddService: for i := range ev.Services { s := &ev.Services[i] name := buildNameString(s.Name, s.Namespace, *domain) mutateEtcdOrDie(func() error { return addDNS(name, s, etcdClient) }) } case RemoveService: for i := range ev.Services { s := &ev.Services[i] name := buildNameString(s.Name, s.Namespace, *domain) mutateEtcdOrDie(func() error { return removeDNS(name, etcdClient) }) } } } //TODO: fully resync periodically. }
// Describer returns the default describe functions for each of the standard // Kubernetes types. func DescriberFor(kind string, c *client.Client) (Describer, bool) { switch kind { case "Pod": return &PodDescriber{ PodClient: func(namespace string) (client.PodInterface, error) { return c.Pods(namespace), nil }, ReplicationControllerClient: func(namespace string) (client.ReplicationControllerInterface, error) { return c.ReplicationControllers(namespace), nil }, }, true case "ReplicationController": return &ReplicationControllerDescriber{ PodClient: func(namespace string) (client.PodInterface, error) { return c.Pods(namespace), nil }, ReplicationControllerClient: func(namespace string) (client.ReplicationControllerInterface, error) { return c.ReplicationControllers(namespace), nil }, }, true case "Service": return &ServiceDescriber{ ServiceClient: func(namespace string) (client.ServiceInterface, error) { return c.Services(namespace), nil }, }, true } return nil, false }
// Creates a replication controller that serves its hostname and a service on top of it. func startServeHostnameService(c *client.Client, ns, name string, port, replicas int) ([]string, string, error) { podNames := make([]string, replicas) _, err := c.Services(ns).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{ Port: port, TargetPort: util.NewIntOrStringFromInt(9376), Protocol: "TCP", }}, Selector: map[string]string{ "name": name, }, }, }) if err != nil { return podNames, "", err } var createdPods []*api.Pod maxContainerFailures := 0 config := RCConfig{ Client: c, Image: "gcr.io/google_containers/serve_hostname:1.1", Name: name, Namespace: ns, PollInterval: 3 * time.Second, Timeout: 30 * time.Second, Replicas: replicas, CreatedPods: &createdPods, MaxContainerFailures: &maxContainerFailures, } err = RunRC(config) if err != nil { return podNames, "", err } if len(createdPods) != replicas { return podNames, "", fmt.Errorf("Incorrect number of running pods: %v", len(createdPods)) } for i := range createdPods { podNames[i] = createdPods[i].ObjectMeta.Name } sort.StringSlice(podNames).Sort() service, err := c.Services(ns).Get(name) if err != nil { return podNames, "", err } if service.Spec.ClusterIP == "" { return podNames, "", fmt.Errorf("Service IP is blank for %v", name) } serviceIP := service.Spec.ClusterIP return podNames, serviceIP, nil }
func stopServeHostnameService(c *client.Client, ns, name string) error { if err := DeleteRC(c, ns, name); err != nil { return err } if err := c.Services(ns).Delete(name); err != nil { return err } return nil }
func runMasterServiceTest(client *client.Client) { time.Sleep(12 * time.Second) svcList, err := client.Services(api.NamespaceDefault).List(labels.Everything()) if err != nil { glog.Fatalf("unexpected error listing services: %v", err) } var foundRW bool found := util.StringSet{} for i := range svcList.Items { found.Insert(svcList.Items[i].Name) if svcList.Items[i].Name == "kubernetes" { foundRW = true } } if foundRW { ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes") if err != nil { glog.Fatalf("unexpected error listing endpoints for kubernetes service: %v", err) } if countEndpoints(ep) == 0 { glog.Fatalf("no endpoints for kubernetes service: %v", ep) } } else { glog.Errorf("no RW service found: %v", found) glog.Fatal("Kubernetes service test failed") } glog.Infof("Master service test passed.") }
func expectedServicesExist(c *client.Client) error { serviceList, err := c.Services(api.NamespaceDefault).List(labels.Everything()) if err != nil { return err } for _, service := range serviceList.Items { if _, ok := expectedServices[service.Name]; ok { expectedServices[service.Name] = true } } for service, found := range expectedServices { if !found { return fmt.Errorf("Service %q not found", service) } } return nil }
func waitForPublicIPs(c *client.Client, serviceName, namespace string) (*api.Service, error) { const timeout = 4 * time.Minute var service *api.Service By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to have a public IP", timeout, serviceName, namespace)) for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { service, err := c.Services(namespace).Get(serviceName) if err != nil { Logf("Get service failed, ignoring for 5s: %v", err) continue } if len(service.Spec.PublicIPs) > 0 { return service, nil } Logf("Waiting for service %s in namespace %s to have a public IP (%v)", serviceName, namespace, time.Since(start)) } return service, fmt.Errorf("service %s in namespace %s doesn't have a public IP after %.2f seconds", serviceName, namespace, timeout.Seconds()) }
func waitForLoadBalancerDestroy(c *client.Client, serviceName, namespace string) (*api.Service, error) { // TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable const timeout = 10 * time.Minute var service *api.Service By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to have no LoadBalancer ingress points", timeout, serviceName, namespace)) for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { service, err := c.Services(namespace).Get(serviceName) if err != nil { Logf("Get service failed, ignoring for 5s: %v", err) continue } if len(service.Status.LoadBalancer.Ingress) == 0 { return service, nil } Logf("Waiting for service %s in namespace %s to have no LoadBalancer ingress points (%v)", serviceName, namespace, time.Since(start)) } return service, fmt.Errorf("service %s in namespace %s still has LoadBalancer ingress points after %.2f seconds", serviceName, namespace, timeout.Seconds()) }
// updateService fetches a service, calls the update function on it, // and then attempts to send the updated service. It retries up to 2 // times in the face of timeouts and conflicts. func updateService(c *client.Client, namespace, serviceName string, update func(*api.Service)) (*api.Service, error) { var service *api.Service var err error for i := 0; i < 3; i++ { service, err = c.Services(namespace).Get(serviceName) if err != nil { return service, err } update(service) service, err = c.Services(namespace).Update(service) if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { return service, err } } return service, err }
// Clean both server and client pods. func volumeTestCleanup(client *client.Client, config VolumeTestConfig) { By(fmt.Sprint("cleaning the environment after ", config.prefix)) defer GinkgoRecover() podClient := client.Pods(config.namespace) serviceClient := client.Services(config.namespace) // ignore all errors, the pods may not be even created podClient.Delete(config.prefix+"-client", nil) serviceClient.Delete(config.prefix + "-server") podClient.Delete(config.prefix+"-server", nil) }
waitForServiceInAddonTest(c, namespace.Name, "addon-test", false) waitForReplicationControllerInAddonTest(c, defaultNsName, "addon-test-v1", false) By("remove manifests") sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv2)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv2)) waitForServiceInAddonTest(c, namespace.Name, "addon-test-updated", false) waitForReplicationControllerInAddonTest(c, namespace.Name, "addon-test-v2", false) By("verify invalid API addons weren't created") _, err = c.ReplicationControllers(namespace.Name).Get("invalid-addon-test-v1") Expect(err).To(HaveOccurred()) _, err = c.ReplicationControllers(defaultNsName).Get("invalid-addon-test-v1") Expect(err).To(HaveOccurred()) _, err = c.Services(namespace.Name).Get("ivalid-addon-test") Expect(err).To(HaveOccurred()) _, err = c.Services(defaultNsName).Get("ivalid-addon-test") Expect(err).To(HaveOccurred()) // invalid addons will be deleted by the deferred function }) }) func waitForServiceInAddonTest(c *client.Client, addonNamespace, name string, exist bool) { expectNoError(waitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) } func waitForReplicationControllerInAddonTest(c *client.Client, addonNamespace, name string, exist bool) { expectNoError(waitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) }
} } Expect(foundRW).To(Equal(true)) Expect(foundRO).To(Equal(true)) }) It("should serve a basic endpoint from pods", func(done Done) { serviceName := "endpoint-test2" ns := api.NamespaceDefault labels := map[string]string{ "foo": "bar", "baz": "blah", } defer func() { err := c.Services(ns).Delete(serviceName) Expect(err).NotTo(HaveOccurred()) }() service := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: serviceName, }, Spec: api.ServiceSpec{ Selector: labels, Ports: []api.ServicePort{{ Port: 80, TargetPort: util.NewIntOrStringFromInt(80), }}, }, }
func newSVCByName(c *client.Client, ns, name string) error { _, err := c.Services(ns).Create(svcByName(name)) return err }
Name: svcName, Labels: map[string]string{ "name": svcName, }, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{ Port: 8765, TargetPort: util.NewIntOrStringFromInt(8080), }}, Selector: map[string]string{ "name": serverName, }, }, } defer c.Services(api.NamespaceDefault).Delete(svc.Name) _, err = c.Services(api.NamespaceDefault).Create(svc) if err != nil { Fail(fmt.Sprintf("Failed to create service: %v", err)) } // Make a client pod that verifies that it has the service environment variables. podName := "client-envvars-" + string(util.NewUUID()) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, Labels: map[string]string{"name": podName}, }, Spec: api.PodSpec{ Containers: []api.Container{ {
if len(ipToPort) != 1 { Logf("No IP found, retrying") continue } for _, port := range ipToPort { if port[0] != redisPort { Failf("Wrong endpoint port: %d", port[0]) } } endpointFound = true break } if !endpointFound { Failf("1 endpoint is expected") } service, err := c.Services(ns).Get(name) Expect(err).NotTo(HaveOccurred()) if len(service.Spec.Ports) != 1 { Failf("1 port is expected") } port := service.Spec.Ports[0] if port.Port != servicePort { Failf("Wrong service port: %d", port.Port) } if port.TargetPort.IntVal != redisPort { Failf("Wrong target port: %d") } } By("exposing RC")
Fail(fmt.Sprintf("unexpected error code. expected 200, got: %v (%v)", resp.StatusCode, resp)) } ns := api.NamespaceDefault // TODO(satnam6502): Replace call of randomSuffix with call to NewUUID when service // names have the same form as pod and replication controller names. name := "nettest-" + randomSuffix() svc, err := c.Services(ns).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: name, Labels: map[string]string{ "name": name, }, }, Spec: api.ServiceSpec{ Port: 8080, ContainerPort: util.NewIntOrStringFromInt(8080), Selector: map[string]string{ "name": name, }, }, }) By(fmt.Sprintf("Creating service with name %s", svc.Name)) if err != nil { Fail(fmt.Sprintf("unable to create test service %s: %v", svc.Name, err)) } // Clean up service defer func() { defer GinkgoRecover() By("Cleaning up the service")
// TestPodHasServiceEnvVars checks that kubelets and scheduler send events about pods scheduling and running. func TestPodHasServiceEnvVars(c *client.Client) bool { // Make a pod that will be a service. // This pod serves its hostname via HTTP. serverPod := parsePodOrDie(`{ "kind": "Pod", "apiVersion": "v1beta1", "id": "srv", "desiredState": { "manifest": { "version": "v1beta1", "id": "srv", "containers": [{ "name": "srv", "image": "kubernetes/serve_hostname", "ports": [{ "containerPort": 80, "hostPort": 8080 }] }] } }, "labels": { "name": "srv" } }`) _, err := c.Pods(api.NamespaceDefault).Create(serverPod) if err != nil { glog.Errorf("Failed to create serverPod: %v", err) return false } defer c.Pods(api.NamespaceDefault).Delete(serverPod.Name) waitForPodRunning(c, serverPod.Name) // This service exposes pod p's port 8080 as a service on port 8765 svc := parseServiceOrDie(`{ "id": "fooservice", "kind": "Service", "apiVersion": "v1beta1", "port": 8765, "containerPort": 8080, "selector": { "name": "p" } }`) if err != nil { glog.Errorf("Failed to delete service: %v", err) return false } time.Sleep(2) _, err = c.Services(api.NamespaceDefault).Create(svc) if err != nil { glog.Errorf("Failed to create service: %v", err) return false } defer c.Services(api.NamespaceDefault).Delete(svc.Name) // TODO: we don't have a way to wait for a service to be "running". // If this proves flaky, then we will need to retry the clientPod or insert a sleep. // Make a client pod that verifies that it has the service environment variables. clientPod := parsePodOrDie(`{ "apiVersion": "v1beta1", "kind": "Pod", "id": "env3", "desiredState": { "manifest": { "version": "v1beta1", "id": "env3", "restartPolicy": { "never": {} }, "containers": [{ "name": "env3cont", "image": "busybox", "command": ["sh", "-c", "env"] }] } }, "labels": { "name": "env3" } }`) _, err = c.Pods(api.NamespaceDefault).Create(clientPod) if err != nil { glog.Errorf("Failed to create pod: %v", err) return false } defer c.Pods(api.NamespaceDefault).Delete(clientPod.Name) // Wait for client pod to complete. success := waitForPodSuccess(c, clientPod.Name, clientPod.Spec.Containers[0].Name) if !success { glog.Errorf("Failed to run client pod to detect service env vars.") } // Grab its logs. Get host first. clientPodStatus, err := c.Pods(api.NamespaceDefault).Get(clientPod.Name) if err != nil { glog.Errorf("Failed to get clientPod to know host: %v", err) return false } glog.Infof("Trying to get logs from host %s pod %s container %s: %v", clientPodStatus.Status.Host, clientPodStatus.Name, clientPodStatus.Spec.Containers[0].Name, err) logs, err := c.Get(). Prefix("proxy"). Resource("minions"). Name(clientPodStatus.Status.Host). Suffix("containerLogs", api.NamespaceDefault, clientPodStatus.Name, clientPodStatus.Spec.Containers[0].Name). Do(). Raw() if err != nil { glog.Errorf("Failed to get logs from host %s pod %s container %s: %v", clientPodStatus.Status.Host, clientPodStatus.Name, clientPodStatus.Spec.Containers[0].Name, err) return false } glog.Info("clientPod logs:", string(logs)) toFind := []string{ "FOOSERVICE_SERVICE_HOST=", "FOOSERVICE_SERVICE_PORT=", "FOOSERVICE_PORT=", "FOOSERVICE_PORT_8765_TCP_PORT=", "FOOSERVICE_PORT_8765_TCP_PROTO=", "FOOSERVICE_PORT_8765_TCP=", "FOOSERVICE_PORT_8765_TCP_ADDR=", } for _, m := range toFind { if !strings.Contains(string(logs), m) { glog.Errorf("Unable to find env var %q in client env vars.", m) success = false } } // We could try a wget the service from the client pod. But services.sh e2e test covers that pretty well. return success }
// ClusterLevelLoggingWithElasticsearch is an end to end test for cluster level logging. func ClusterLevelLoggingWithElasticsearch(c *client.Client) { // TODO: For now assume we are only testing cluster logging with Elasticsearch // on GCE. Once we are sure that Elasticsearch cluster level logging // works for other providers we should widen this scope of this test. if !providerIs("gce") { Logf("Skipping cluster level logging test for provider %s", testContext.Provider) return } // Check for the existence of the Elasticsearch service. By("Checking the Elasticsearch service exists.") s := c.Services(api.NamespaceDefault) // Make a few attempts to connect. This makes the test robust against // being run as the first e2e test just after the e2e cluster has been created. var err error const graceTime = 10 * time.Minute for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { if _, err = s.Get("elasticsearch-logging"); err == nil { break } Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start)) } Expect(err).NotTo(HaveOccurred()) // Wait for the Elasticsearch pods to enter the running state. By("Checking to make sure the Elasticsearch pods are running") label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "elasticsearch-logging"})) pods, err := c.Pods(api.NamespaceDefault).List(label, fields.Everything()) Expect(err).NotTo(HaveOccurred()) for _, pod := range pods.Items { err = waitForPodRunning(c, pod.Name) Expect(err).NotTo(HaveOccurred()) } By("Checking to make sure we are talking to an Elasticsearch service.") // Perform a few checks to make sure this looks like an Elasticsearch cluster. var statusCode float64 var esResponse map[string]interface{} err = nil for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) { // Query against the root URL for Elasticsearch. body, err := c.Get(). Namespace(api.NamespaceDefault). Prefix("proxy"). Resource("services"). Name("elasticsearch-logging"). DoRaw() if err != nil { Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err) continue } esResponse, err = bodyToJSON(body) if err != nil { Logf("After %v failed to convert Elasticsearch JSON response %v to map[string]interface{}: %v", time.Since(start), string(body), err) continue } statusIntf, ok := esResponse["status"] if !ok { Logf("After %v Elasticsearch response has no status field: %v", time.Since(start), esResponse) continue } statusCode, ok = statusIntf.(float64) if !ok { // Assume this is a string returning Failure. Retry. Logf("After %v expected status to be a float64 but got %v of type %T", time.Since(start), statusIntf, statusIntf) continue } break } Expect(err).NotTo(HaveOccurred()) if int(statusCode) != 200 { Failf("Elasticsearch cluster has a bad status: %v", statusCode) } // Check to see if have a cluster_name field. clusterName, ok := esResponse["cluster_name"] if !ok { Failf("No cluster_name field in Elasticsearch response: %v", esResponse) } if clusterName != "kubernetes_logging" { Failf("Connected to wrong cluster %q (expecting kubernetes_logging)", clusterName) } // Now assume we really are talking to an Elasticsearch instance. // Check the cluster health. By("Checking health of Elasticsearch service.") body, err := c.Get(). Namespace(api.NamespaceDefault). Prefix("proxy"). Resource("services"). Name("elasticsearch-logging"). Suffix("_cluster/health"). Param("health", "pretty"). DoRaw() Expect(err).NotTo(HaveOccurred()) health, err := bodyToJSON(body) Expect(err).NotTo(HaveOccurred()) statusIntf, ok := health["status"] if !ok { Failf("No status field found in cluster health response: %v", health) } status := statusIntf.(string) if status != "green" && status != "yellow" { Failf("Cluster health has bad status: %s", status) } // Obtain a list of nodes so we can place one synthetic logger on each node. nodes, err := c.Nodes().List(labels.Everything(), fields.Everything()) if err != nil { Failf("Failed to list nodes: %v", err) } nodeCount := len(nodes.Items) if nodeCount == 0 { Failf("Failed to find any nodes") } // Create a unique root name for the resources in this test to permit // parallel executions of this test. // Use a unique namespace for the resources created in this test. ns := "es-logging-" + randomSuffix() name := "synthlogger" // Form a unique name to taint log lines to be colelcted. // Replace '-' characters with '_' to prevent the analyzer from breaking apart names. taintName := strings.Replace(ns+name, "-", "_", -1) // podNames records the names of the synthetic logging pods that are created in the // loop below. var podNames []string // countTo is the number of log lines emitted (and checked) for each synthetic logging pod. const countTo = 100 // Instantiate a synthetic logger pod on each node. for i, node := range nodes.Items { podName := fmt.Sprintf("%s-%d", name, i) _, err := c.Pods(ns).Create(&api.Pod{ ObjectMeta: api.ObjectMeta{ Name: podName, Labels: map[string]string{"name": name}, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "synth-logger", Image: "gcr.io/google_containers/ubuntu:14.04", Command: []string{"bash", "-c", fmt.Sprintf("i=0; while ((i < %d)); do echo \"%d %s $i %s\"; i=$(($i+1)); done", countTo, i, taintName, podName)}, }, }, Host: node.Name, RestartPolicy: api.RestartPolicyNever, }, }) Expect(err).NotTo(HaveOccurred()) podNames = append(podNames, podName) } // Cleanup the pods when we are done. defer func() { for _, pod := range podNames { if err = c.Pods(ns).Delete(pod); err != nil { Logf("Failed to delete pod %s: %v", pod, err) } } }() // Wait for the syntehtic logging pods to finish. By("Waiting for the pods to succeed.") for _, pod := range podNames { err = waitForPodSuccessInNamespace(c, pod, "synth-logger", ns) Expect(err).NotTo(HaveOccurred()) } // Wait a bit for the log information to make it into Elasticsearch. time.Sleep(30 * time.Second) // Make several attempts to observe the logs ingested into Elasticsearch. By("Checking all the log lines were ingested into Elasticsearch") missing := 0 expected := nodeCount * countTo for start := time.Now(); time.Since(start) < graceTime; time.Sleep(10 * time.Second) { // Ask Elasticsearch to return all the log lines that were tagged with the underscore // verison of the name. Ask for twice as many log lines as we expect to check for // duplication bugs. body, err = c.Get(). Namespace(api.NamespaceDefault). Prefix("proxy"). Resource("services"). Name("elasticsearch-logging"). Suffix("_search"). Param("q", fmt.Sprintf("log:%s", taintName)). Param("size", strconv.Itoa(2*expected)). DoRaw() if err != nil { Logf("After %v failed to make proxy call to elasticsearch-logging: %v", time.Since(start), err) continue } response, err := bodyToJSON(body) if err != nil { Logf("After %v failed to unmarshal response: %v", time.Since(start), err) continue } hits, ok := response["hits"].(map[string]interface{}) if !ok { Failf("response[hits] not of the expected type: %T", response["hits"]) } totalF, ok := hits["total"].(float64) if !ok { Logf("After %v hits[total] not of the expected type: %T", time.Since(start), hits["total"]) continue } total := int(totalF) if total < expected { Logf("After %v expecting to find %d log lines but saw only %d", time.Since(start), expected, total) continue } h, ok := hits["hits"].([]interface{}) if !ok { Logf("After %v hits not of the expected type: %T", time.Since(start), hits["hits"]) continue } // Initialize data-structure for observing counts. observed := make([][]int, nodeCount) for i := range observed { observed[i] = make([]int, countTo) } // Iterate over the hits and populate the observed array. for _, e := range h { l, ok := e.(map[string]interface{}) if !ok { Failf("element of hit not of expected type: %T", e) } source, ok := l["_source"].(map[string]interface{}) if !ok { Failf("_source not of the expected type: %T", l["_source"]) } msg, ok := source["log"].(string) if !ok { Failf("log not of the expected type: %T", source["log"]) } words := strings.Split(msg, " ") if len(words) < 4 { Failf("Malformed log line: %s", msg) } n, err := strconv.ParseUint(words[0], 10, 0) if err != nil { Failf("Expecting numer of node as first field of %s", msg) } if n < 0 || int(n) >= nodeCount { Failf("Node count index out of range: %d", nodeCount) } index, err := strconv.ParseUint(words[2], 10, 0) if err != nil { Failf("Expecting number as third field of %s", msg) } if index < 0 || index >= countTo { Failf("Index value out of range: %d", index) } // Record the observation of a log line from node n at the given index. observed[n][index]++ } // Make sure we correctly observed the expected log lines from each node. missing = 0 for n := range observed { for i, c := range observed[n] { if c == 0 { missing++ } if c < 0 || c > 1 { Failf("Got incorrect count for node %d index %d: %d", n, i, c) } } } if missing != 0 { Logf("After %v still missing %d log lines", time.Since(start), missing) continue } Logf("After %s found all %d log lines", time.Since(start), expected) return } Failf("Failed to find all %d log lines", expected) }
func runServiceTest(client *client.Client) { pod := api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", Labels: map[string]string{ "name": "thisisalonglabel", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "c1", Image: "foo", Ports: []api.Port{ {ContainerPort: 1234}, }, }, }, }, Status: api.PodStatus{ PodIP: "1.2.3.4", }, } _, err := client.Pods(api.NamespaceDefault).Create(&pod) if err != nil { glog.Fatalf("Failed to create pod: %v, %v", pod, err) } if err := wait.Poll(time.Second, time.Second*20, podExists(client, pod.Namespace, pod.Name)); err != nil { glog.Fatalf("FAILED: pod never started running %v", err) } svc1 := api.Service{ ObjectMeta: api.ObjectMeta{Name: "service1"}, Spec: api.ServiceSpec{ Selector: map[string]string{ "name": "thisisalonglabel", }, Port: 8080, }, } _, err = client.Services(api.NamespaceDefault).Create(&svc1) if err != nil { glog.Fatalf("Failed to create service: %v, %v", svc1, err) } if err := wait.Poll(time.Second, time.Second*20, endpointsSet(client, svc1.Namespace, svc1.Name, 1)); err != nil { glog.Fatalf("FAILED: unexpected endpoints: %v", err) } // A second service with the same port. svc2 := api.Service{ ObjectMeta: api.ObjectMeta{Name: "service2"}, Spec: api.ServiceSpec{ Selector: map[string]string{ "name": "thisisalonglabel", }, Port: 8080, }, } _, err = client.Services(api.NamespaceDefault).Create(&svc2) if err != nil { glog.Fatalf("Failed to create service: %v, %v", svc2, err) } if err := wait.Poll(time.Second, time.Second*20, endpointsSet(client, svc2.Namespace, svc2.Name, 1)); err != nil { glog.Fatalf("FAILED: unexpected endpoints: %v", err) } glog.Info("Service test passed.") }
func runPatchTest(c *client.Client) { name := "patchservice" resource := "services" svcBody := api.Service{ TypeMeta: api.TypeMeta{ APIVersion: c.APIVersion(), }, ObjectMeta: api.ObjectMeta{ Name: name, Labels: map[string]string{}, }, Spec: api.ServiceSpec{ // This is here because validation requires it. Selector: map[string]string{ "foo": "bar", }, Ports: []api.ServicePort{{ Port: 12345, Protocol: "TCP", }}, SessionAffinity: "None", }, } services := c.Services(api.NamespaceDefault) svc, err := services.Create(&svcBody) if err != nil { glog.Fatalf("Failed creating patchservice: %v", err) } patchBodies := map[string]map[api.PatchType]struct { AddLabelBody []byte RemoveLabelBody []byte RemoveAllLabelsBody []byte }{ "v1": { api.JSONPatchType: { []byte(`[{"op":"add","path":"/metadata/labels","value":{"foo":"bar","baz":"qux"}}]`), []byte(`[{"op":"remove","path":"/metadata/labels/foo"}]`), []byte(`[{"op":"remove","path":"/metadata/labels"}]`), }, api.MergePatchType: { []byte(`{"metadata":{"labels":{"foo":"bar","baz":"qux"}}}`), []byte(`{"metadata":{"labels":{"foo":null}}}`), []byte(`{"metadata":{"labels":null}}`), }, api.StrategicMergePatchType: { []byte(`{"metadata":{"labels":{"foo":"bar","baz":"qux"}}}`), []byte(`{"metadata":{"labels":{"foo":null}}}`), []byte(`{"metadata":{"labels":{"$patch":"replace"}}}`), }, }, } pb := patchBodies[c.APIVersion()] execPatch := func(pt api.PatchType, body []byte) error { return c.Patch(pt). Resource(resource). Namespace(api.NamespaceDefault). Name(name). Body(body). Do(). Error() } for k, v := range pb { // add label err := execPatch(k, v.AddLabelBody) if err != nil { glog.Fatalf("Failed updating patchservice with patch type %s: %v", k, err) } svc, err = services.Get(name) if err != nil { glog.Fatalf("Failed getting patchservice: %v", err) } if len(svc.Labels) != 2 || svc.Labels["foo"] != "bar" || svc.Labels["baz"] != "qux" { glog.Fatalf("Failed updating patchservice with patch type %s: labels are: %v", k, svc.Labels) } // remove one label err = execPatch(k, v.RemoveLabelBody) if err != nil { glog.Fatalf("Failed updating patchservice with patch type %s: %v", k, err) } svc, err = services.Get(name) if err != nil { glog.Fatalf("Failed getting patchservice: %v", err) } if len(svc.Labels) != 1 || svc.Labels["baz"] != "qux" { glog.Fatalf("Failed updating patchservice with patch type %s: labels are: %v", k, svc.Labels) } // remove all labels err = execPatch(k, v.RemoveAllLabelsBody) if err != nil { glog.Fatalf("Failed updating patchservice with patch type %s: %v", k, err) } svc, err = services.Get(name) if err != nil { glog.Fatalf("Failed getting patchservice: %v", err) } if svc.Labels != nil { glog.Fatalf("Failed remove all labels from patchservice with patch type %s: %v", k, svc.Labels) } } glog.Info("PATCHs work.") }
func runAtomicPutTest(c *client.Client) { svcBody := api.Service{ TypeMeta: api.TypeMeta{ APIVersion: c.APIVersion(), }, ObjectMeta: api.ObjectMeta{ Name: "atomicservice", Labels: map[string]string{ "name": "atomicService", }, }, Spec: api.ServiceSpec{ // This is here because validation requires it. Selector: map[string]string{ "foo": "bar", }, Ports: []api.ServicePort{{ Port: 12345, Protocol: "TCP", }}, SessionAffinity: "None", }, } services := c.Services(api.NamespaceDefault) svc, err := services.Create(&svcBody) if err != nil { glog.Fatalf("Failed creating atomicService: %v", err) } glog.Info("Created atomicService") testLabels := labels.Set{ "foo": "bar", } for i := 0; i < 5; i++ { // a: z, b: y, etc... testLabels[string([]byte{byte('a' + i)})] = string([]byte{byte('z' - i)}) } var wg sync.WaitGroup wg.Add(len(testLabels)) for label, value := range testLabels { go func(l, v string) { for { glog.Infof("Starting to update (%s, %s)", l, v) tmpSvc, err := services.Get(svc.Name) if err != nil { glog.Errorf("Error getting atomicService: %v", err) continue } if tmpSvc.Spec.Selector == nil { tmpSvc.Spec.Selector = map[string]string{l: v} } else { tmpSvc.Spec.Selector[l] = v } glog.Infof("Posting update (%s, %s)", l, v) tmpSvc, err = services.Update(tmpSvc) if err != nil { if apierrors.IsConflict(err) { glog.Infof("Conflict: (%s, %s)", l, v) // This is what we expect. continue } glog.Errorf("Unexpected error putting atomicService: %v", err) continue } break } glog.Infof("Done update (%s, %s)", l, v) wg.Done() }(label, value) } wg.Wait() svc, err = services.Get(svc.Name) if err != nil { glog.Fatalf("Failed getting atomicService after writers are complete: %v", err) } if !reflect.DeepEqual(testLabels, labels.Set(svc.Spec.Selector)) { glog.Fatalf("Selector PUTs were not atomic: wanted %v, got %v", testLabels, svc.Spec.Selector) } glog.Info("Atomic PUTs work.") }
func runSelfLinkTestOnNamespace(c *client.Client, namespace string) { svcBody := api.Service{ ObjectMeta: api.ObjectMeta{ Name: "selflinktest", Namespace: namespace, Labels: map[string]string{ "name": "selflinktest", }, }, Spec: api.ServiceSpec{ // This is here because validation requires it. Selector: map[string]string{ "foo": "bar", }, Ports: []api.ServicePort{{ Port: 12345, Protocol: "TCP", }}, SessionAffinity: "None", }, } services := c.Services(namespace) svc, err := services.Create(&svcBody) if err != nil { glog.Fatalf("Failed creating selflinktest service: %v", err) } err = c.Get().RequestURI(svc.SelfLink).Do().Into(svc) if err != nil { glog.Fatalf("Failed listing service with supplied self link '%v': %v", svc.SelfLink, err) } svcList, err := services.List(labels.Everything()) if err != nil { glog.Fatalf("Failed listing services: %v", err) } err = c.Get().RequestURI(svcList.SelfLink).Do().Into(svcList) if err != nil { glog.Fatalf("Failed listing services with supplied self link '%v': %v", svcList.SelfLink, err) } found := false for i := range svcList.Items { item := &svcList.Items[i] if item.Name != "selflinktest" { continue } found = true err = c.Get().RequestURI(item.SelfLink).Do().Into(svc) if err != nil { glog.Fatalf("Failed listing service with supplied self link '%v': %v", item.SelfLink, err) } break } if !found { glog.Fatalf("never found selflinktest service in namespace %s", namespace) } glog.Infof("Self link test passed in namespace %s", namespace) // TODO: Should test PUT at some point, too. }
// Starts a container specified by config.serverImage and exports all // config.serverPorts from it via a service. The function returns IP // address of the service. func startVolumeServer(client *client.Client, config VolumeTestConfig) string { podClient := client.Pods(config.namespace) serviceClient := client.Services(config.namespace) portCount := len(config.serverPorts) serverPodPorts := make([]api.ContainerPort, portCount) servicePorts := make([]api.ServicePort, portCount) for i := 0; i < portCount; i++ { portName := fmt.Sprintf("%s-%d", config.prefix, i) serverPodPorts[i] = api.ContainerPort{ Name: portName, ContainerPort: config.serverPorts[i], Protocol: api.ProtocolTCP, } servicePorts[i] = api.ServicePort{ Name: portName, Protocol: "TCP", Port: config.serverPorts[i], TargetPort: util.NewIntOrStringFromInt(config.serverPorts[i]), } } By(fmt.Sprint("creating ", config.prefix, " server pod")) privileged := new(bool) *privileged = true serverPod := &api.Pod{ TypeMeta: api.TypeMeta{ Kind: "Pod", APIVersion: "v1", }, ObjectMeta: api.ObjectMeta{ Name: config.prefix + "-server", Labels: map[string]string{ "role": config.prefix + "-server", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: config.prefix + "-server", Image: config.serverImage, SecurityContext: &api.SecurityContext{ Privileged: privileged, }, Ports: serverPodPorts, }, }, }, } _, err := podClient.Create(serverPod) expectNoError(err, "Failed to create %s pod: %v", serverPod.Name, err) expectNoError(waitForPodRunningInNamespace(client, serverPod.Name, config.namespace)) By(fmt.Sprint("creating ", config.prefix, " service")) service := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: config.prefix + "-server", }, Spec: api.ServiceSpec{ Selector: map[string]string{ "role": config.prefix + "-server", }, Ports: servicePorts, }, } createdService, err := serviceClient.Create(service) expectNoError(err, "Failed to create %s service: %v", service.Name, err) By("sleeping a bit to give the server time to start") time.Sleep(20 * time.Second) ip := createdService.Spec.ClusterIP return ip }
waitForReplicationControllerInAddonTest(c, "addon-test-v2", true) waitForServiceInAddonTest(c, "addon-test", false) waitForReplicationControllerInAddonTest(c, "addon-test-v1", false) By("remove manifests") sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv2)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv2)) waitForServiceInAddonTest(c, "addon-test-updated", false) waitForReplicationControllerInAddonTest(c, "invalid-addon-test-v1", false) By("verify invalid API addons weren't created") _, err = c.ReplicationControllers(addonNamespace).Get("invalid-addon-test-v1") Expect(err).To(HaveOccurred()) _, err = c.Services(addonNamespace).Get("ivalid-addon-test") Expect(err).To(HaveOccurred()) // invalid addons will be deleted by the deferred function }) }) func waitForServiceInAddonTest(c *client.Client, name string, exist bool) { expectNoError(waitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) } func waitForReplicationControllerInAddonTest(c *client.Client, name string, exist bool) { expectNoError(waitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) } // TODO marekbiskup 2015-06-11: merge the ssh code into pkg/util/ssh.go after
Expect(err).NotTo(HaveOccurred()) namespaces[i] = namespacePtr.Name } }) AfterEach(func() { for _, ns := range namespaces { By(fmt.Sprintf("Destroying namespace %v", ns)) if err := c.Namespaces().Delete(ns); err != nil { Failf("Couldn't delete namespace %s: %s", ns, err) } } }) // TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here. It("should provide secure master service", func() { _, err := c.Services(api.NamespaceDefault).Get("kubernetes") Expect(err).NotTo(HaveOccurred()) }) It("should serve a basic endpoint from pods", func() { serviceName := "endpoint-test2" ns := namespaces[0] labels := map[string]string{ "foo": "bar", "baz": "blah", } defer func() { err := c.Services(ns).Delete(serviceName) Expect(err).NotTo(HaveOccurred()) }()
func runServiceTest(client *client.Client) { pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", Labels: map[string]string{ "name": "thisisalonglabel", }, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "c1", Image: "foo", Ports: []api.ContainerPort{ {ContainerPort: 1234}, }, ImagePullPolicy: api.PullIfNotPresent, }, }, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, }, Status: api.PodStatus{ PodIP: "1.2.3.4", }, } pod, err := client.Pods(api.NamespaceDefault).Create(pod) if err != nil { glog.Fatalf("Failed to create pod: %v, %v", pod, err) } if err := wait.Poll(time.Second, time.Second*20, podExists(client, pod.Namespace, pod.Name)); err != nil { glog.Fatalf("FAILED: pod never started running %v", err) } svc1 := &api.Service{ ObjectMeta: api.ObjectMeta{Name: "service1"}, Spec: api.ServiceSpec{ Selector: map[string]string{ "name": "thisisalonglabel", }, Ports: []api.ServicePort{{ Port: 8080, Protocol: "TCP", }}, SessionAffinity: "None", }, } svc1, err = client.Services(api.NamespaceDefault).Create(svc1) if err != nil { glog.Fatalf("Failed to create service: %v, %v", svc1, err) } // create an identical service in the non-default namespace svc3 := &api.Service{ ObjectMeta: api.ObjectMeta{Name: "service1"}, Spec: api.ServiceSpec{ Selector: map[string]string{ "name": "thisisalonglabel", }, Ports: []api.ServicePort{{ Port: 8080, Protocol: "TCP", }}, SessionAffinity: "None", }, } svc3, err = client.Services("other").Create(svc3) if err != nil { glog.Fatalf("Failed to create service: %v, %v", svc3, err) } // TODO Reduce the timeouts in this test when endpoints controller is sped up. See #6045. if err := wait.Poll(time.Second, time.Second*60, endpointsSet(client, svc1.Namespace, svc1.Name, 1)); err != nil { glog.Fatalf("FAILED: unexpected endpoints: %v", err) } // A second service with the same port. svc2 := &api.Service{ ObjectMeta: api.ObjectMeta{Name: "service2"}, Spec: api.ServiceSpec{ Selector: map[string]string{ "name": "thisisalonglabel", }, Ports: []api.ServicePort{{ Port: 8080, Protocol: "TCP", }}, SessionAffinity: "None", }, } svc2, err = client.Services(api.NamespaceDefault).Create(svc2) if err != nil { glog.Fatalf("Failed to create service: %v, %v", svc2, err) } if err := wait.Poll(time.Second, time.Second*60, endpointsSet(client, svc2.Namespace, svc2.Name, 1)); err != nil { glog.Fatalf("FAILED: unexpected endpoints: %v", err) } if err := wait.Poll(time.Second, time.Second*60, endpointsSet(client, svc3.Namespace, svc3.Name, 0)); err != nil { glog.Fatalf("FAILED: service in other namespace should have no endpoints: %v", err) } svcList, err := client.Services(api.NamespaceAll).List(labels.Everything()) if err != nil { glog.Fatalf("Failed to list services across namespaces: %v", err) } names := util.NewStringSet() for _, svc := range svcList.Items { names.Insert(fmt.Sprintf("%s/%s", svc.Namespace, svc.Name)) } if !names.HasAll("default/kubernetes", "default/service1", "default/service2", "other/service1") { glog.Fatalf("Unexpected service list: %#v", names) } glog.Info("Service test passed.") }
func TestNetwork(c *client.Client) bool { ns := api.NamespaceDefault svc, err := c.Services(ns).Create(loadObjectOrDie(assetPath( "contrib", "for-tests", "network-tester", "service.json", )).(*api.Service)) if err != nil { glog.Errorf("unable to create test service: %v", err) return false } // Clean up service defer func() { if err = c.Services(ns).Delete(svc.Name); err != nil { glog.Errorf("unable to delete svc %v: %v", svc.Name, err) } }() rc, err := c.ReplicationControllers(ns).Create(loadObjectOrDie(assetPath( "contrib", "for-tests", "network-tester", "rc.json", )).(*api.ReplicationController)) if err != nil { glog.Errorf("unable to create test rc: %v", err) return false } // Clean up rc defer func() { rc.Spec.Replicas = 0 rc, err = c.ReplicationControllers(ns).Update(rc) if err != nil { glog.Errorf("unable to modify replica count for rc %v: %v", rc.Name, err) return } if err = c.ReplicationControllers(ns).Delete(rc.Name); err != nil { glog.Errorf("unable to delete rc %v: %v", rc.Name, err) } }() const maxAttempts = 60 for i := 0; i < maxAttempts; i++ { time.Sleep(time.Second) body, err := c.Get().Prefix("proxy").Resource("services").Name(svc.Name).Suffix("status").Do().Raw() if err != nil { glog.Infof("Attempt %v/%v: service/pod still starting. (error: '%v')", i, maxAttempts, err) continue } switch string(body) { case "pass": glog.Infof("Passed on attempt %v. Cleaning up.", i) return true case "running": glog.Infof("Attempt %v/%v: test still running", i, maxAttempts) case "fail": if body, err := c.Get().Prefix("proxy").Resource("services").Name(svc.Name).Suffix("read").Do().Raw(); err != nil { glog.Infof("Failed on attempt %v. Cleaning up. Error reading details: %v", i, err) } else { glog.Infof("Failed on attempt %v. Cleaning up. Details:\n%v", i, string(body)) } return false } } if body, err := c.Get().Prefix("proxy").Resource("services").Name(svc.Name).Suffix("read").Do().Raw(); err != nil { glog.Infof("Timed out. Cleaning up. Error reading details: %v", err) } else { glog.Infof("Timed out. Cleaning up. Details:\n%v", string(body)) } return false }
if testContext.Provider == "vagrant" { By("Skipping test which is broken for vagrant (See https://github.com/GoogleCloudPlatform/kubernetes/issues/3580)") return } By(fmt.Sprintf("Creating a service named [%s] in namespace %s", svcname, namespace.Name)) svc, err := c.Services(namespace.Name).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: svcname, Labels: map[string]string{ "name": svcname, }, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{ Protocol: "TCP", Port: 8080, TargetPort: util.NewIntOrStringFromInt(8080), }}, Selector: map[string]string{ "name": svcname, }, }, }) if err != nil { Failf("unable to create test service named [%s] %v", svc.Name, err) } // Clean up service defer func() { defer GinkgoRecover()