// Describer returns the default describe functions for each of the standard // Kubernetes types. func DescriberFor(kind string, c *client.Client) (Describer, bool) { switch kind { case "Pod": return &PodDescriber{ PodClient: func(namespace string) (client.PodInterface, error) { return c.Pods(namespace), nil }, ReplicationControllerClient: func(namespace string) (client.ReplicationControllerInterface, error) { return c.ReplicationControllers(namespace), nil }, }, true case "ReplicationController": return &ReplicationControllerDescriber{ PodClient: func(namespace string) (client.PodInterface, error) { return c.Pods(namespace), nil }, ReplicationControllerClient: func(namespace string) (client.ReplicationControllerInterface, error) { return c.ReplicationControllers(namespace), nil }, }, true case "Service": return &ServiceDescriber{ ServiceClient: func(namespace string) (client.ServiceInterface, error) { return c.Services(namespace), nil }, }, true } return nil, false }
func watchNodes(client *client.Client) { nodeList, err := client.Nodes().List(labels.Everything(), fields.Everything()) if err != nil { log.Fatal(err) } nodes := nodeList.Items writeNodeTargetsFile(nodes) watcher, err := client.Nodes().Watch(labels.Everything(), fields.Everything(), nodeList.ResourceVersion) if err != nil { log.Fatal(err) } for event := range watcher.ResultChan() { switch event.Type { case watch.Added: switch obj := event.Object.(type) { case *api.Node: nodes = append(nodes, *obj) } writeNodeTargetsFile(nodes) case watch.Deleted: switch obj := event.Object.(type) { case *api.Node: index := findNodeIndexInSlice(nodes, obj) nodes = append(nodes[:index], nodes[index+1:]...) } writeNodeTargetsFile(nodes) } } }
func runMasterServiceTest(client *client.Client) { time.Sleep(12 * time.Second) svcList, err := client.Services(api.NamespaceDefault).List(labels.Everything()) if err != nil { glog.Fatalf("unexpected error listing services: %v", err) } var foundRW bool found := util.StringSet{} for i := range svcList.Items { found.Insert(svcList.Items[i].Name) if svcList.Items[i].Name == "kubernetes" { foundRW = true } } if foundRW { ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes") if err != nil { glog.Fatalf("unexpected error listing endpoints for kubernetes service: %v", err) } if countEndpoints(ep) == 0 { glog.Fatalf("no endpoints for kubernetes service: %v", ep) } } else { glog.Errorf("no RW service found: %v", found) glog.Fatal("Kubernetes service test failed") } glog.Infof("Master service test passed.") }
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) { rcList, err := c.ReplicationControllers(api.NamespaceDefault).List(labels.Everything()) if err != nil { return nil, err } expectedPods := []string{} for _, rc := range rcList.Items { if _, ok := expectedRcs[rc.Name]; ok { if rc.Status.Replicas != 1 { return nil, fmt.Errorf("expected to find only one replica for rc %q, found %d", rc.Name, rc.Status.Replicas) } expectedRcs[rc.Name] = true podList, err := c.Pods(api.NamespaceDefault).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything()) if err != nil { return nil, err } for _, pod := range podList.Items { expectedPods = append(expectedPods, string(pod.UID)) } } } for rc, found := range expectedRcs { if !found { return nil, fmt.Errorf("Replication Controller %q not found.", rc) } } return expectedPods, nil }
func runStaticPodTest(c *client.Client, configFilePath string) { manifest := `version: v1beta2 id: static-pod containers: - name: static-container image: kubernetes/pause` manifestFile, err := ioutil.TempFile(configFilePath, "") defer os.Remove(manifestFile.Name()) ioutil.WriteFile(manifestFile.Name(), []byte(manifest), 0600) // Wait for the mirror pod to be created. hostname, _ := os.Hostname() podName := fmt.Sprintf("static-pod-%s", hostname) namespace := kubelet.NamespaceDefault if err := wait.Poll(time.Second, time.Second*30, podRunning(c, namespace, podName)); err != nil { glog.Fatalf("FAILED: mirror pod has not been created or is not running: %v", err) } // Delete the mirror pod, and wait for it to be recreated. c.Pods(namespace).Delete(podName) if err = wait.Poll(time.Second, time.Second*30, podRunning(c, namespace, podName)); err != nil { glog.Fatalf("FAILED: mirror pod has not been re-created or is not running: %v", err) } // Remove the manifest file, and wait for the mirror pod to be deleted. os.Remove(manifestFile.Name()) if err = wait.Poll(time.Second, time.Second*30, podNotFound(c, namespace, podName)); err != nil { glog.Fatalf("FAILED: mirror pod has not been deleted: %v", err) } }
// createNamespaceIfDoesNotExist ensures that the namespace with specified name exists, or returns an error func createNamespaceIfDoesNotExist(c *client.Client, name string) (*api.Namespace, error) { namespace, err := c.Namespaces().Get(name) if err != nil { namespace, err = c.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: name}}) } return namespace, err }
func waitForClusterSize(c *client.Client, size int) error { timeout := 4 * time.Minute if providerIs("aws") { // AWS is not as fast as gce/gke at having nodes come online timeout = 10 * time.Minute } for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { nodes, err := c.Nodes().List(labels.Everything(), fields.Everything()) if err != nil { Logf("Failed to list nodes: %v", err) continue } // Filter out not-ready nodes. filterNodes(nodes, func(node api.Node) bool { return isNodeReadySetAsExpected(&node, true) }) if len(nodes.Items) == size { Logf("Cluster has reached the desired size %d", size) return nil } Logf("Waiting for cluster size %d, current size %d", size, len(nodes.Items)) } return fmt.Errorf("timeout waiting for cluster size to be %d", size) }
func runReplicationControllerTest(c *client.Client) { clientAPIVersion := c.APIVersion() data, err := ioutil.ReadFile("cmd/integration/" + clientAPIVersion + "-controller.json") if err != nil { glog.Fatalf("Unexpected error: %v", err) } var controller api.ReplicationController if err := api.Scheme.DecodeInto(data, &controller); err != nil { glog.Fatalf("Unexpected error: %v", err) } glog.Infof("Creating replication controllers") updated, err := c.ReplicationControllers("test").Create(&controller) if err != nil { glog.Fatalf("Unexpected error: %v", err) } glog.Infof("Done creating replication controllers") // Give the controllers some time to actually create the pods if err := wait.Poll(time.Second, time.Second*30, client.ControllerHasDesiredReplicas(c, updated)); err != nil { glog.Fatalf("FAILED: pods never created %v", err) } // Poll till we can retrieve the status of all pods matching the given label selector from their minions. // This involves 3 operations: // - The scheduler must assign all pods to a minion // - The assignment must reflect in a `List` operation against the apiserver, for labels matching the selector // - We need to be able to query the kubelet on that minion for information about the pod if err := wait.Poll( time.Second, time.Second*30, podsOnMinions(c, "test", labels.Set(updated.Spec.Selector).AsSelector())); err != nil { glog.Fatalf("FAILED: pods never started running %v", err) } glog.Infof("Pods created") }
func watchOnce(etcdClient *etcd.Client, kubeClient *kclient.Client) { // Start the goroutine to produce update events. updates := make(chan serviceUpdate) startWatching(kubeClient.Services(kapi.NamespaceAll), updates) // This loop will break if the channel closes, which is how the // goroutine signals an error. for ev := range updates { if *verbose { log.Printf("Received update event: %#v", ev) } switch ev.Op { case SetServices, AddService: for i := range ev.Services { s := &ev.Services[i] name := buildNameString(s.Name, s.Namespace, *domain) mutateEtcdOrDie(func() error { return addDNS(name, s, etcdClient) }) } case RemoveService: for i := range ev.Services { s := &ev.Services[i] name := buildNameString(s.Name, s.Namespace, *domain) mutateEtcdOrDie(func() error { return removeDNS(name, etcdClient) }) } } } //TODO: fully resync periodically. }
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) { expectedPods := []string{} // Iterate over the labels that identify the replication controllers that we // want to check. The rcLabels contains the value values for the k8s-app key // that identify the replication controllers that we want to check. Using a label // rather than an explicit name is preferred because the names will typically have // a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling // update e.g. to heapster-monitoring-v2. By using a label query we can check for the // situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller // is running (which would be an error except during a rolling update). for _, rcLabel := range rcLabels { rcList, err := c.ReplicationControllers(api.NamespaceDefault).List(labels.Set{"k8s-app": rcLabel}.AsSelector()) if err != nil { return nil, err } if len(rcList.Items) != 1 { return nil, fmt.Errorf("expected to find one replica for RC with label %s but got %d", rcLabel, len(rcList.Items)) } for _, rc := range rcList.Items { podList, err := c.Pods(api.NamespaceDefault).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything()) if err != nil { return nil, err } for _, pod := range podList.Items { expectedPods = append(expectedPods, string(pod.UID)) } } } return expectedPods, nil }
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up // a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods. func StartPods(numPods int, host string, restClient *client.Client) error { start := time.Now() defer func() { glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods) }() hostField := fields.OneTermEqualSelector(client.PodHost, host) pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField) if err != nil || len(pods.Items) == numPods { return err } glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods) // For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest. controller := RCFromManifest(TestRCManifest) // Make the rc unique to the given host. controller.Spec.Replicas = numPods controller.Spec.Template.Spec.NodeName = host controller.Name = controller.Name + host controller.Spec.Selector["host"] = host controller.Spec.Template.Labels["host"] = host if rc, err := StartRC(controller, restClient); err != nil { return err } else { // Delete the rc, otherwise when we restart master components for the next benchmark // the rc controller will race with the pods controller in the rc manager. return restClient.ReplicationControllers(TestNS).Delete(rc.Name) } }
func TestKubernetesROService(c *client.Client) bool { svc := api.ServiceList{} err := c.Get(). Namespace("default"). AbsPath("/api/v1beta1/proxy/services/kubernetes-ro/api/v1beta1/services"). Do(). Into(&svc) if err != nil { glog.Errorf("unexpected error listing services using ro service: %v", err) return false } var foundRW, foundRO bool for i := range svc.Items { if svc.Items[i].Name == "kubernetes" { foundRW = true } if svc.Items[i].Name == "kubernetes-ro" { foundRO = true } } if !foundRW { glog.Error("no RW service found") } if !foundRO { glog.Error("no RO service found") } if !foundRW || !foundRO { return false } return true }
func runReplicationControllerTest(kubeClient *client.Client) { data, err := ioutil.ReadFile("api/examples/controller.json") if err != nil { glog.Fatalf("Unexpected error: %#v", err) } var controllerRequest api.ReplicationController if err := json.Unmarshal(data, &controllerRequest); err != nil { glog.Fatalf("Unexpected error: %#v", err) } glog.Infof("Creating replication controllers") if _, err := kubeClient.CreateReplicationController(controllerRequest); err != nil { glog.Fatalf("Unexpected error: %#v", err) } glog.Infof("Done creating replication controllers") // Give the controllers some time to actually create the pods time.Sleep(time.Second * 10) // Validate that they're truly up. pods, err := kubeClient.ListPods(labels.Set(controllerRequest.DesiredState.ReplicaSelector).AsSelector()) if err != nil || len(pods.Items) != controllerRequest.DesiredState.Replicas { glog.Fatalf("FAILED: %#v", pods.Items) } glog.Infof("Replication controller produced:\n\n%#v\n\n", pods) }
// waitForPodSuccess returns true if the pod reached state success, or false if it reached failure or ran too long. func waitForPodSuccess(c *client.Client, podName string, contName string) bool { for i := 0; i < 10; i++ { if i > 0 { time.Sleep(5 * time.Second) } pod, err := c.Pods(api.NamespaceDefault).Get(podName) if err != nil { glog.Warningf("Get pod failed: %v", err) continue } // Cannot use pod.Status.Phase == api.PodSucceeded/api.PodFailed due to #2632 ci, ok := pod.Status.Info[contName] if !ok { glog.Infof("No Status.Info for container %s in pod %s yet", contName, podName) } else { if ci.State.Termination != nil { if ci.State.Termination.ExitCode == 0 { glog.Infof("Saw pod success") return true } else { glog.Infof("Saw pod failure: %+v", ci.State.Termination) } glog.Infof("Waiting for pod %q status to be success or failure", podName) } else { glog.Infof("Nil State.Termination for container %s in pod %s so far", contName, podName) } } } glog.Warningf("Gave up waiting for pod %q status to be success or failure", podName) return false }
func CreateNewControllerFromCurrentController(c *client.Client, namespace, oldName, newName, image, deploymentKey string) (*api.ReplicationController, error) { // load the old RC into the "new" RC newRc, err := c.ReplicationControllers(namespace).Get(oldName) if err != nil { return nil, err } if len(newRc.Spec.Template.Spec.Containers) > 1 { // TODO: support multi-container image update. return nil, goerrors.New("Image update is not supported for multi-container pods") } if len(newRc.Spec.Template.Spec.Containers) == 0 { return nil, goerrors.New(fmt.Sprintf("Pod has no containers! (%v)", newRc)) } newRc.Spec.Template.Spec.Containers[0].Image = image newHash, err := api.HashObject(newRc, c.Codec) if err != nil { return nil, err } if len(newName) == 0 { newName = fmt.Sprintf("%s-%s", newRc.Name, newHash) } newRc.Name = newName newRc.Spec.Selector[deploymentKey] = newHash newRc.Spec.Template.Labels[deploymentKey] = newHash // Clear resource version after hashing so that identical updates get different hashes. newRc.ResourceVersion = "" return newRc, nil }
func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) { By("getting list of nodes") nodeList, err := c.Nodes().List(labels.Everything(), fields.Everything()) expectNoError(err) var errors []error retries := maxRetries for { errors = []error{} for _, node := range nodeList.Items { // cadvisor is not accessible directly unless its port (4194 by default) is exposed. // Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally. statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name) By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource)) _, err = c.Get().AbsPath(statsResource).Timeout(timeout).Do().Raw() if err != nil { errors = append(errors, err) } } if len(errors) == 0 { return } if retries--; retries <= 0 { break } Logf("failed to retrieve kubelet stats -\n %v", errors) time.Sleep(sleepDuration) } Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors) }
// NodeSSHHosts returns SSH-able host names for all nodes. It returns an error // if it can't find an external IP for every node, though it still returns all // hosts that it found in that case. func NodeSSHHosts(c *client.Client) ([]string, error) { var hosts []string nodelist, err := c.Nodes().List(labels.Everything(), fields.Everything()) if err != nil { return hosts, fmt.Errorf("error getting nodes: %v", err) } for _, n := range nodelist.Items { for _, addr := range n.Status.Addresses { // Use the first external IP address we find on the node, and // use at most one per node. // TODO(mbforbes): Use the "preferred" address for the node, once // such a thing is defined (#2462). if addr.Type == api.NodeExternalIP { hosts = append(hosts, addr.Address+":22") break } } } // Error if any node didn't have an external IP. if len(hosts) != len(nodelist.Items) { return hosts, fmt.Errorf( "only found %d external IPs on nodes, but found %d nodes. Nodelist: %v", len(hosts), len(nodelist.Items), nodelist) } return hosts, nil }
func extinguish(c *client.Client, totalNS int, maxAllowedAfterDel int, maxSeconds int) { var err error for n := 0; n < totalNS; n += 1 { _, err = createTestingNS(fmt.Sprintf("nslifetest-%v", n), c) Expect(err).NotTo(HaveOccurred()) } //Wait 10 seconds, then SEND delete requests for all the namespaces. time.Sleep(time.Duration(10 * time.Second)) nsList, err := c.Namespaces().List(labels.Everything(), fields.Everything()) Expect(err).NotTo(HaveOccurred()) for _, item := range nsList.Items { if strings.Contains(item.Name, "nslifetest") { if err := c.Namespaces().Delete(item.Name); err != nil { Failf("Failed deleting error ::: --- %v ", err) } } Logf("namespace : %v api call to delete is complete ", item) } //Now POLL until all namespaces have been eradicated. expectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second, func() (bool, error) { if rem, err := countRemaining(c, "nslifetest"); err != nil || rem > maxAllowedAfterDel { Logf("Remaining namespaces : %v", rem) return false, err } else { return true, nil } })) }
// testHostIP tests that a pod gets a host IP func testHostIP(c *client.Client, pod *api.Pod) { ns := "e2e-test-" + string(util.NewUUID()) podClient := c.Pods(ns) By("creating pod") defer podClient.Delete(pod.Name) _, err := podClient.Create(pod) if err != nil { Fail(fmt.Sprintf("Failed to create pod: %v", err)) } By("ensuring that pod is running and has a hostIP") // Wait for the pods to enter the running state. Waiting loops until the pods // are running so non-running pods cause a timeout for this test. err = waitForPodRunningInNamespace(c, pod.Name, ns) Expect(err).NotTo(HaveOccurred()) // Try to make sure we get a hostIP for each pod. hostIPTimeout := 2 * time.Minute t := time.Now() for { p, err := podClient.Get(pod.Name) Expect(err).NotTo(HaveOccurred()) if p.Status.HostIP != "" { Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP) break } if time.Since(t) >= hostIPTimeout { Failf("Gave up waiting for hostIP of pod %s after %v seconds", p.Name, time.Since(t).Seconds()) } Logf("Retrying to get the hostIP of pod %s", p.Name) time.Sleep(5 * time.Second) } }
func runReplicationControllerTest(c *client.Client) { data, err := ioutil.ReadFile("api/examples/controller.json") if err != nil { glog.Fatalf("Unexpected error: %v", err) } var controller api.ReplicationController if err := api.Scheme.DecodeInto(data, &controller); err != nil { glog.Fatalf("Unexpected error: %v", err) } glog.Infof("Creating replication controllers") if _, err := c.ReplicationControllers(api.NamespaceDefault).Create(&controller); err != nil { glog.Fatalf("Unexpected error: %v", err) } glog.Infof("Done creating replication controllers") // Give the controllers some time to actually create the pods if err := wait.Poll(time.Second, time.Second*30, client.ControllerHasDesiredReplicas(c, &controller)); err != nil { glog.Fatalf("FAILED: pods never created %v", err) } // wait for minions to indicate they have info about the desired pods pods, err := c.Pods(api.NamespaceDefault).List(labels.Set(controller.Spec.Selector).AsSelector()) if err != nil { glog.Fatalf("FAILED: unable to get pods to list: %v", err) } if err := wait.Poll(time.Second, time.Second*30, podsOnMinions(c, *pods)); err != nil { glog.Fatalf("FAILED: pods never started running %v", err) } glog.Infof("Pods created") }
// Creates a replication controller that serves its hostname and a service on top of it. func startServeHostnameService(c *client.Client, ns, name string, port, replicas int) ([]string, string, error) { podNames := make([]string, replicas) _, err := c.Services(ns).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{ Port: port, TargetPort: util.NewIntOrStringFromInt(9376), Protocol: "TCP", }}, Selector: map[string]string{ "name": name, }, }, }) if err != nil { return podNames, "", err } var createdPods []*api.Pod maxContainerFailures := 0 config := RCConfig{ Client: c, Image: "gcr.io/google_containers/serve_hostname:1.1", Name: name, Namespace: ns, PollInterval: 3 * time.Second, Timeout: 30 * time.Second, Replicas: replicas, CreatedPods: &createdPods, MaxContainerFailures: &maxContainerFailures, } err = RunRC(config) if err != nil { return podNames, "", err } if len(createdPods) != replicas { return podNames, "", fmt.Errorf("Incorrect number of running pods: %v", len(createdPods)) } for i := range createdPods { podNames[i] = createdPods[i].ObjectMeta.Name } sort.StringSlice(podNames).Sort() service, err := c.Services(ns).Get(name) if err != nil { return podNames, "", err } if service.Spec.ClusterIP == "" { return podNames, "", fmt.Errorf("Service IP is blank for %v", name) } serviceIP := service.Spec.ClusterIP return podNames, serviceIP, nil }
func newProjectAuthorizationCache(openshiftClient *osclient.Client, kubeClient *kclient.Client, policyClient policyclient.ReadOnlyPolicyClient) *projectauth.AuthorizationCache { return projectauth.NewAuthorizationCache( projectauth.NewReviewer(openshiftClient), kubeClient.Namespaces(), policyClient, ) }
// Performs a get on a node proxy endpoint given the nodename and rest client. func nodeProxyRequest(c *client.Client, node, endpoint string) client.Result { return c.Get(). Prefix("proxy"). Resource("nodes"). Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)). Suffix(endpoint). Do() }
func runSelfLinkTest(c *client.Client) { var svc api.Service err := c.Post().Resource("services").Body( &api.Service{ ObjectMeta: api.ObjectMeta{ Name: "selflinktest", Labels: map[string]string{ "name": "selflinktest", }, }, Spec: api.ServiceSpec{ Port: 12345, // This is here because validation requires it. Selector: map[string]string{ "foo": "bar", }, }, }, ).Do().Into(&svc) if err != nil { glog.Fatalf("Failed creating selflinktest service: %v", err) } err = c.Get().AbsPath(svc.SelfLink).Do().Into(&svc) if err != nil { glog.Fatalf("Failed listing service with supplied self link '%v': %v", svc.SelfLink, err) } var svcList api.ServiceList err = c.Get().Resource("services").Do().Into(&svcList) if err != nil { glog.Fatalf("Failed listing services: %v", err) } err = c.Get().AbsPath(svcList.SelfLink).Do().Into(&svcList) if err != nil { glog.Fatalf("Failed listing services with supplied self link '%v': %v", svcList.SelfLink, err) } found := false for i := range svcList.Items { item := &svcList.Items[i] if item.Name != "selflinktest" { continue } found = true err = c.Get().AbsPath(item.SelfLink).Do().Into(&svc) if err != nil { glog.Fatalf("Failed listing service with supplied self link '%v': %v", item.SelfLink, err) } break } if !found { glog.Fatalf("never found selflinktest service") } glog.Infof("Self link test passed") // TODO: Should test PUT at some point, too. }
// NewEventsSource initializes a new events source and starts a // goroutine to watch/fetch event updates. func NewEventsSource(client *kubeclient.Client) EventsSource { // Buffered channel to send/receive events from eventsChan := make(chan eventsUpdate, 1024) errorChan := make(chan error) glog.V(4).Infof("Starting event source") go watchLoop(client.Events(kubeapi.NamespaceAll), eventsChan, errorChan) glog.V(4).Infof("Finished starting event source") return &eventsSourceImpl{client, eventsChan, errorChan} }
func stopServeHostnameService(c *client.Client, ns, name string) error { if err := DeleteRC(c, ns, name); err != nil { return err } if err := c.Services(ns).Delete(name); err != nil { return err } return nil }
func endpointsSet(c *client.Client, serviceNamespace, serviceID string, endpointCount int) wait.ConditionFunc { return func() (bool, error) { endpoints, err := c.Endpoints(serviceNamespace).Get(serviceID) if err != nil { return false, nil } return len(endpoints.Endpoints) == endpointCount, nil } }
// ListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector func NewListWatchFromClient(client *client.Client, resource string, namespace string, fieldSelector labels.Selector) *ListWatch { listFunc := func() (runtime.Object, error) { return client.Get().Namespace(namespace).Resource(resource).SelectorParam("fields", fieldSelector).Do().Get() } watchFunc := func(resourceVersion string) (watch.Interface, error) { return client.Get().Prefix("watch").Namespace(namespace).Resource(resource).SelectorParam("fields", fieldSelector).Param("resourceVersion", resourceVersion).Watch() } return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} }
func resizeRC(c *client.Client, ns, name string, replicas int) error { rc, err := c.ReplicationControllers(ns).Get(name) if err != nil { return err } rc.Spec.Replicas = replicas _, err = c.ReplicationControllers(rc.Namespace).Update(rc) return err }
func newPodOnNode(c *client.Client, namespace, podName, nodeName string) error { pod, err := c.Pods(namespace).Create(podOnNode(podName, nodeName, serveHostnameImage)) if err == nil { Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName) } else { Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err) } return err }