func TestNewClient(t *testing.T) { o := NewObjects(api.Scheme, api.Scheme) if err := AddObjectsFromPath("../../../../examples/guestbook/frontend-service.yaml", o, api.Scheme); err != nil { t.Fatal(err) } client := &Fake{} client.AddReactor("*", "*", ObjectReaction(o, testapi.Default.RESTMapper())) list, err := client.Services("test").List(labels.Everything(), fields.Everything()) if err != nil { t.Fatal(err) } if len(list.Items) != 1 { t.Fatalf("unexpected list %#v", list) } // When list is invoked a second time, the same results are returned. list, err = client.Services("test").List(labels.Everything(), fields.Everything()) if err != nil { t.Fatal(err) } if len(list.Items) != 1 { t.Fatalf("unexpected list %#v", list) } t.Logf("list: %#v", list) }
func New(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, threshold int) *GCController { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartRecordingToSink(kubeClient.Events("")) gcc := &GCController{ kubeClient: kubeClient, threshold: threshold, deletePod: func(namespace, name string) error { return kubeClient.Pods(namespace).Delete(name, api.NewDeleteOptions(0)) }, } terminatedSelector := compileTerminatedPodSelector() gcc.podStore.Store, gcc.podStoreSyncer = framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return gcc.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), terminatedSelector) }, WatchFunc: func(rv string) (watch.Interface, error) { return gcc.kubeClient.Pods(api.NamespaceAll).Watch(labels.Everything(), terminatedSelector, rv) }, }, &api.Pod{}, resyncPeriod(), framework.ResourceEventHandlerFuncs{}, ) return gcc }
// TestGetNodeAddresses verifies that proper results are returned // when requesting node addresses. func TestGetNodeAddresses(t *testing.T) { master, _, assert := setUp(t) // Fail case (no addresses associated with nodes) nodes, _ := master.nodeRegistry.ListNodes(api.NewDefaultContext(), labels.Everything(), fields.Everything()) addrs, err := master.getNodeAddresses() assert.Error(err, "getNodeAddresses should have caused an error as there are no addresses.") assert.Equal([]string(nil), addrs) // Pass case with External type IP nodes, _ = master.nodeRegistry.ListNodes(api.NewDefaultContext(), labels.Everything(), fields.Everything()) for index := range nodes.Items { nodes.Items[index].Status.Addresses = []api.NodeAddress{{Type: api.NodeExternalIP, Address: "127.0.0.1"}} } addrs, err = master.getNodeAddresses() assert.NoError(err, "getNodeAddresses should not have returned an error.") assert.Equal([]string{"127.0.0.1", "127.0.0.1"}, addrs) // Pass case with LegacyHost type IP nodes, _ = master.nodeRegistry.ListNodes(api.NewDefaultContext(), labels.Everything(), fields.Everything()) for index := range nodes.Items { nodes.Items[index].Status.Addresses = []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: "127.0.0.2"}} } addrs, err = master.getNodeAddresses() assert.NoError(err, "getNodeAddresses failback should not have returned an error.") assert.Equal([]string{"127.0.0.2", "127.0.0.2"}, addrs) }
// NewResourceQuota creates a new resource quota admission control handler func NewResourceQuota(client client.Interface) admission.Interface { lw := &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return client.ResourceQuotas(api.NamespaceAll).List(labels.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return client.ResourceQuotas(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion) }, } indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.ResourceQuota{}, 0) reflector.Run() return createResourceQuota(client, indexer) }
// NewTokensController returns a new *TokensController. func NewTokensController(cl client.Interface, options TokensControllerOptions) *TokensController { e := &TokensController{ client: cl, token: options.TokenGenerator, rootCA: options.RootCA, } e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return e.client.ServiceAccounts(api.NamespaceAll).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(rv string) (watch.Interface, error) { return e.client.ServiceAccounts(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv) }, }, &api.ServiceAccount{}, options.ServiceAccountResync, framework.ResourceEventHandlerFuncs{ AddFunc: e.serviceAccountAdded, UpdateFunc: e.serviceAccountUpdated, DeleteFunc: e.serviceAccountDeleted, }, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}, ) tokenSelector := fields.SelectorFromSet(map[string]string{client.SecretType: string(api.SecretTypeServiceAccountToken)}) e.secrets, e.secretController = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return e.client.Secrets(api.NamespaceAll).List(labels.Everything(), tokenSelector) }, WatchFunc: func(rv string) (watch.Interface, error) { return e.client.Secrets(api.NamespaceAll).Watch(labels.Everything(), tokenSelector, rv) }, }, &api.Secret{}, options.SecretResync, framework.ResourceEventHandlerFuncs{ AddFunc: e.secretAdded, UpdateFunc: e.secretUpdated, DeleteFunc: e.secretDeleted, }, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}, ) e.serviceAccountsSynced = e.serviceAccountController.HasSynced e.secretsSynced = e.secretController.HasSynced return e }
// NewPersistentVolumeClaimBinder creates a new PersistentVolumeClaimBinder func NewPersistentVolumeClaimBinder(kubeClient client.Interface, syncPeriod time.Duration) *PersistentVolumeClaimBinder { volumeIndex := NewPersistentVolumeOrderedIndex() binderClient := NewBinderClient(kubeClient) binder := &PersistentVolumeClaimBinder{ volumeIndex: volumeIndex, client: binderClient, } _, volumeController := framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return kubeClient.PersistentVolumes().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return kubeClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.PersistentVolume{}, // TODO: Can we have much longer period here? syncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: binder.addVolume, UpdateFunc: binder.updateVolume, DeleteFunc: binder.deleteVolume, }, ) _, claimController := framework.NewInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return kubeClient.PersistentVolumeClaims(api.NamespaceAll).List(labels.Everything(), fields.Everything()) }, WatchFunc: func(resourceVersion string) (watch.Interface, error) { return kubeClient.PersistentVolumeClaims(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion) }, }, &api.PersistentVolumeClaim{}, // TODO: Can we have much longer period here? syncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: binder.addClaim, UpdateFunc: binder.updateClaim, // no DeleteFunc needed. a claim requires no clean-up. // syncVolume handles the missing claim }, ) binder.claimController = claimController binder.volumeController = volumeController return binder }
func doServiceAccountAPIRequests(t *testing.T, c *client.Client, ns string, authenticated bool, canRead bool, canWrite bool) { testSecret := &api.Secret{ ObjectMeta: api.ObjectMeta{Name: "testSecret"}, Data: map[string][]byte{"test": []byte("data")}, } readOps := []testOperation{ func() error { _, err := c.Secrets(ns).List(labels.Everything(), fields.Everything()); return err }, func() error { _, err := c.Pods(ns).List(labels.Everything(), fields.Everything()); return err }, } writeOps := []testOperation{ func() error { _, err := c.Secrets(ns).Create(testSecret); return err }, func() error { return c.Secrets(ns).Delete(testSecret.Name) }, } for _, op := range readOps { err := op() unauthorizedError := errors.IsUnauthorized(err) forbiddenError := errors.IsForbidden(err) switch { case !authenticated && !unauthorizedError: t.Fatalf("expected unauthorized error, got %v", err) case authenticated && unauthorizedError: t.Fatalf("unexpected unauthorized error: %v", err) case authenticated && canRead && forbiddenError: t.Fatalf("unexpected forbidden error: %v", err) case authenticated && !canRead && !forbiddenError: t.Fatalf("expected forbidden error, got: %v", err) } } for _, op := range writeOps { err := op() unauthorizedError := errors.IsUnauthorized(err) forbiddenError := errors.IsForbidden(err) switch { case !authenticated && !unauthorizedError: t.Fatalf("expected unauthorized error, got %v", err) case authenticated && unauthorizedError: t.Fatalf("unexpected unauthorized error: %v", err) case authenticated && canWrite && forbiddenError: t.Fatalf("unexpected forbidden error: %v", err) case authenticated && !canWrite && !forbiddenError: t.Fatalf("expected forbidden error, got: %v", err) } } }
// NewServiceAccountsController returns a new *ServiceAccountsController. func NewServiceAccountsController(cl client.Interface, options ServiceAccountsControllerOptions) *ServiceAccountsController { e := &ServiceAccountsController{ client: cl, names: options.Names, } accountSelector := fields.Everything() if len(options.Names) == 1 { // If we're maintaining a single account, we can scope the accounts we watch to just that name accountSelector = fields.SelectorFromSet(map[string]string{client.ObjectNameField: options.Names.List()[0]}) } e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return e.client.ServiceAccounts(api.NamespaceAll).List(labels.Everything(), accountSelector) }, WatchFunc: func(rv string) (watch.Interface, error) { return e.client.ServiceAccounts(api.NamespaceAll).Watch(labels.Everything(), accountSelector, rv) }, }, &api.ServiceAccount{}, options.ServiceAccountResync, framework.ResourceEventHandlerFuncs{ DeleteFunc: e.serviceAccountDeleted, }, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc}, ) e.namespaces, e.namespaceController = framework.NewIndexerInformer( &cache.ListWatch{ ListFunc: func() (runtime.Object, error) { return e.client.Namespaces().List(labels.Everything(), fields.Everything()) }, WatchFunc: func(rv string) (watch.Interface, error) { return e.client.Namespaces().Watch(labels.Everything(), fields.Everything(), rv) }, }, &api.Namespace{}, options.NamespaceResync, framework.ResourceEventHandlerFuncs{ AddFunc: e.namespaceAdded, UpdateFunc: e.namespaceUpdated, }, cache.Indexers{"name": nameIndexFunc}, ) return e }
func deletePods(kubeClient client.Interface, ns string, before unversioned.Time) (int64, error) { items, err := kubeClient.Pods(ns).List(labels.Everything(), fields.Everything()) if err != nil { return 0, err } expired := unversioned.Now().After(before.Time) var deleteOptions *api.DeleteOptions if expired { deleteOptions = api.NewDeleteOptions(0) } estimate := int64(0) for i := range items.Items { if items.Items[i].Spec.TerminationGracePeriodSeconds != nil { grace := *items.Items[i].Spec.TerminationGracePeriodSeconds if grace > estimate { estimate = grace } } err := kubeClient.Pods(ns).Delete(items.Items[i].Name, deleteOptions) if err != nil && !errors.IsNotFound(err) { return 0, err } } if expired { estimate = 0 } return estimate, nil }
func (gcc *GCController) gc() { terminatedPods, _ := gcc.podStore.List(labels.Everything()) terminatedPodCount := len(terminatedPods) sort.Sort(byCreationTimestamp(terminatedPods)) deleteCount := terminatedPodCount - gcc.threshold if deleteCount > terminatedPodCount { deleteCount = terminatedPodCount } if deleteCount > 0 { glog.Infof("garbage collecting %v pods", deleteCount) } var wait sync.WaitGroup for i := 0; i < deleteCount; i++ { wait.Add(1) go func(namespace string, name string) { defer wait.Done() if err := gcc.deletePod(namespace, name); err != nil { // ignore not founds defer util.HandleError(err) } }(terminatedPods[i].Namespace, terminatedPods[i].Name) } wait.Wait() }
func TestListIngress(t *testing.T) { ns := api.NamespaceAll c := &testClient{ Request: testRequest{ Method: "GET", Path: testapi.Extensions.ResourcePath(getIngressResourceName(), ns, ""), }, Response: Response{StatusCode: 200, Body: &extensions.IngressList{ Items: []extensions.Ingress{ { ObjectMeta: api.ObjectMeta{ Name: "foo", Labels: map[string]string{ "foo": "bar", "name": "baz", }, }, Spec: extensions.IngressSpec{ Rules: []extensions.IngressRule{}, }, }, }, }, }, } receivedIngressList, err := c.Setup(t).Extensions().Ingress(ns).List(labels.Everything(), fields.Everything()) c.Validate(t, receivedIngressList, err) }
func TestListControllers(t *testing.T) { ns := api.NamespaceAll c := &testClient{ Request: testRequest{ Method: "GET", Path: testapi.Default.ResourcePath(getRCResourceName(), ns, ""), }, Response: Response{StatusCode: 200, Body: &api.ReplicationControllerList{ Items: []api.ReplicationController{ { ObjectMeta: api.ObjectMeta{ Name: "foo", Labels: map[string]string{ "foo": "bar", "name": "baz", }, }, Spec: api.ReplicationControllerSpec{ Replicas: 2, Template: &api.PodTemplateSpec{}, }, }, }, }, }, } receivedControllerList, err := c.Setup(t).ReplicationControllers(ns).List(labels.Everything()) c.Validate(t, receivedControllerList, err) }
func (t *Tester) testListFound(obj runtime.Object, assignFn AssignFunc) { ctx := t.TestContext() foo1 := copyOrDie(obj) t.setObjectMeta(foo1, "foo1") foo2 := copyOrDie(obj) t.setObjectMeta(foo2, "foo2") existing := assignFn([]runtime.Object{foo1, foo2}) listObj, err := t.storage.(rest.Lister).List(ctx, labels.Everything(), fields.Everything()) if err != nil { t.Errorf("unexpected error: %v", err) } items, err := listToItems(listObj) if err != nil { t.Errorf("unexpected error: %v", err) } if len(items) != len(existing) { t.Errorf("unexpected number of items: %v", len(items)) } if !api.Semantic.DeepEqual(existing, items) { t.Errorf("expected: %#v, got: %#v", existing, items) } }
func (ks *KubernetesScheduler) recoverTasks() error { podList, err := ks.client.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything()) if err != nil { log.V(1).Infof("failed to recover pod registry, madness may ensue: %v", err) return err } recoverSlave := func(t *podtask.T) { slaveId := t.Spec.SlaveID ks.slaveHostNames.Register(slaveId, t.Offer.Host()) } for _, pod := range podList.Items { if _, isMirrorPod := pod.Annotations[kubelet.ConfigMirrorAnnotationKey]; isMirrorPod { // mirrored pods are never reconciled because the scheduler isn't responsible for // scheduling them; they're started by the executor/kubelet upon instantiation and // reflected in the apiserver afterward. the scheduler has no knowledge of them. continue } if t, ok, err := podtask.RecoverFrom(pod); err != nil { log.Errorf("failed to recover task from pod, will attempt to delete '%v/%v': %v", pod.Namespace, pod.Name, err) err := ks.client.Pods(pod.Namespace).Delete(pod.Name, nil) //TODO(jdef) check for temporary or not-found errors if err != nil { log.Errorf("failed to delete pod '%v/%v': %v", pod.Namespace, pod.Name, err) } } else if ok { ks.taskRegistry.Register(t, nil) recoverSlave(t) log.Infof("recovered task %v from pod %v/%v", t.ID, pod.Namespace, pod.Name) } } return nil }
// afterEach deletes the namespace, after reading its events. func (f *Framework) afterEach() { // Print events if the test failed. if CurrentGinkgoTestDescription().Failed { By(fmt.Sprintf("Collecting events from namespace %q.", f.Namespace.Name)) events, err := f.Client.Events(f.Namespace.Name).List(labels.Everything(), fields.Everything()) Expect(err).NotTo(HaveOccurred()) for _, e := range events.Items { Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message) } // Note that we don't wait for any cleanup to propagate, which means // that if you delete a bunch of pods right before ending your test, // you may or may not see the killing/deletion/cleanup events. dumpAllPodInfo(f.Client) } // Check whether all nodes are ready after the test. if err := allNodesReady(f.Client, time.Minute); err != nil { Failf("All nodes should be ready after test, %v", err) } By(fmt.Sprintf("Destroying namespace %q for this suite.", f.Namespace.Name)) timeout := 5 * time.Minute if f.NamespaceDeletionTimeout != 0 { timeout = f.NamespaceDeletionTimeout } if err := deleteNS(f.Client, f.Namespace.Name, timeout); err != nil { Failf("Couldn't delete ns %q: %s", f.Namespace.Name, err) } // Paranoia-- prevent reuse! f.Namespace = nil f.Client = nil }
func testReboot(c *client.Client, rebootCmd string) { // Get all nodes, and kick off the test on each. nodelist, err := listNodes(c, labels.Everything(), fields.Everything()) if err != nil { Failf("Error getting nodes: %v", err) } result := make(chan bool, len(nodelist.Items)) for _, n := range nodelist.Items { go rebootNode(c, testContext.Provider, n.ObjectMeta.Name, rebootCmd, result) } // Wait for all to finish and check the final result. failed := false // TODO(a-robinson): Change to `for range` syntax and remove logging once // we support only Go >= 1.4. for _, n := range nodelist.Items { if !<-result { Failf("Node %s failed reboot test.", n.ObjectMeta.Name) failed = true } } if failed { Failf("Test failed; at least one node failed to reboot in the time given.") } }
func runMasterServiceTest(client *client.Client) { time.Sleep(12 * time.Second) svcList, err := client.Services(api.NamespaceDefault).List(labels.Everything()) if err != nil { glog.Fatalf("unexpected error listing services: %v", err) } var foundRW bool found := sets.String{} for i := range svcList.Items { found.Insert(svcList.Items[i].Name) if svcList.Items[i].Name == "kubernetes" { foundRW = true } } if foundRW { ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes") if err != nil { glog.Fatalf("unexpected error listing endpoints for kubernetes service: %v", err) } if countEndpoints(ep) == 0 { glog.Fatalf("no endpoints for kubernetes service: %v", ep) } } else { glog.Errorf("no RW service found: %v", found) glog.Fatal("Kubernetes service test failed") } glog.Infof("Master service test passed.") }
func (t *Tester) testListNotFound(assignFn AssignFunc, setRVFn SetRVFunc) { ctx := t.TestContext() setRVFn(uint64(123)) _ = assignFn([]runtime.Object{}) listObj, err := t.storage.(rest.Lister).List(ctx, labels.Everything(), fields.Everything()) if err != nil { t.Errorf("unexpected error: %v", err) } items, err := listToItems(listObj) if err != nil { t.Errorf("unexpected error: %v", err) } if len(items) != 0 { t.Errorf("unexpected items: %v", items) } meta, err := api.ListMetaFor(listObj) if err != nil { t.Errorf("unexpected error: %v", err) } if meta.ResourceVersion != "123" { t.Errorf("unexpected resource version: %d", meta.ResourceVersion) } }
func TestListServices(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ Request: testRequest{ Method: "GET", Path: testapi.Default.ResourcePath("services", ns, ""), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: &api.ServiceList{ Items: []api.Service{ { ObjectMeta: api.ObjectMeta{ Name: "name", Labels: map[string]string{ "foo": "bar", "name": "baz", }, }, Spec: api.ServiceSpec{ Selector: map[string]string{ "one": "two", }, }, }, }, }, }, } receivedServiceList, err := c.Setup(t).Services(ns).List(labels.Everything()) t.Logf("received services: %v %#v", err, receivedServiceList) c.Validate(t, receivedServiceList, err) }
// returns true if the provided node still has pods scheduled to it, or an error if // the server could not be contacted. func (nc *NodeController) hasPods(nodeName string) (bool, error) { pods, err := nc.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.OneTermEqualSelector(client.PodHost, nodeName)) if err != nil { return false, err } return len(pods.Items) > 0, nil }
func TestListPods(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ Request: testRequest{Method: "GET", Path: testapi.Default.ResourcePath("pods", ns, ""), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: &api.PodList{ Items: []api.Pod{ { Status: api.PodStatus{ Phase: api.PodRunning, }, ObjectMeta: api.ObjectMeta{ Labels: map[string]string{ "foo": "bar", "name": "baz", }, }, }, }, }, }, } receivedPodList, err := c.Setup(t).Pods(ns).List(labels.Everything(), fields.Everything()) c.Validate(t, receivedPodList, err) }
func main() { var ingClient client.IngressInterface if kubeClient, err := client.NewInCluster(); err != nil { log.Fatalf("Failed to create client: %v.", err) } else { ingClient = kubeClient.Extensions().Ingress(api.NamespaceAll) } tmpl, _ := template.New("nginx").Parse(nginxConf) rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1) known := &extensions.IngressList{} // Controller loop shellOut("nginx") for { rateLimiter.Accept() ingresses, err := ingClient.List(labels.Everything(), fields.Everything()) if err != nil { log.Printf("Error retrieving ingresses: %v", err) continue } if reflect.DeepEqual(ingresses.Items, known.Items) { continue } known = ingresses if w, err := os.Create("/etc/nginx/nginx.conf"); err != nil { log.Fatalf("Failed to open %v: %v", nginxConf, err) } else if err := tmpl.Execute(w, ingresses); err != nil { log.Fatalf("Failed to write template %v", err) } shellOut("nginx -s reload") } }
func TestListDaemonSets(t *testing.T) { ns := api.NamespaceAll c := &testClient{ Request: testRequest{ Method: "GET", Path: testapi.Extensions.ResourcePath(getDSResourceName(), ns, ""), }, Response: Response{StatusCode: 200, Body: &extensions.DaemonSetList{ Items: []extensions.DaemonSet{ { ObjectMeta: api.ObjectMeta{ Name: "foo", Labels: map[string]string{ "foo": "bar", "name": "baz", }, }, Spec: extensions.DaemonSetSpec{ Template: &api.PodTemplateSpec{}, }, }, }, }, }, } receivedDSs, err := c.Setup(t).Extensions().DaemonSets(ns).List(labels.Everything()) c.Validate(t, receivedDSs, err) }
func validate(f Framework, svcNameWant, rcNameWant string, ingress api.LoadBalancerIngress, podsWant int) error { Logf("Beginning cluster validation") // Verify RC. rcs, err := f.Client.ReplicationControllers(f.Namespace.Name).List(labels.Everything()) if err != nil { return fmt.Errorf("error listing RCs: %v", err) } if len(rcs.Items) != 1 { return fmt.Errorf("wanted 1 RC with name %s, got %d", rcNameWant, len(rcs.Items)) } if got := rcs.Items[0].Name; got != rcNameWant { return fmt.Errorf("wanted RC name %q, got %q", rcNameWant, got) } // Verify pods. if err := verifyPods(f.Client, f.Namespace.Name, rcNameWant, false, podsWant); err != nil { return fmt.Errorf("failed to find %d %q pods: %v", podsWant, rcNameWant, err) } // Verify service. svc, err := f.Client.Services(f.Namespace.Name).Get(svcNameWant) if err != nil { return fmt.Errorf("error getting service %s: %v", svcNameWant, err) } if svcNameWant != svc.Name { return fmt.Errorf("wanted service name %q, got %q", svcNameWant, svc.Name) } // TODO(mikedanese): Make testLoadBalancerReachable return an error. testLoadBalancerReachable(ingress, 80) Logf("Cluster validation succeeded") return nil }
// deletePods will delete all pods from master running on given node, and return true // if any pods were deleted. func (nc *NodeController) deletePods(nodeName string) (bool, error) { remaining := false pods, err := nc.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.OneTermEqualSelector(client.PodHost, nodeName)) if err != nil { return remaining, err } if len(pods.Items) > 0 { nc.recordNodeEvent(nodeName, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName)) } for _, pod := range pods.Items { // Defensive check, also needed for tests. if pod.Spec.NodeName != nodeName { continue } // if the pod has already been deleted, ignore it if pod.DeletionGracePeriodSeconds != nil { continue } glog.V(2).Infof("Starting deletion of pod %v", pod.Name) nc.recorder.Eventf(&pod, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName) if err := nc.kubeClient.Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { return false, err } remaining = true } return remaining, nil }
func TestNamespaceList(t *testing.T) { namespaceList := &api.NamespaceList{ Items: []api.Namespace{ { ObjectMeta: api.ObjectMeta{Name: "foo"}, }, }, } c := &testClient{ Request: testRequest{ Method: "GET", Path: testapi.Default.ResourcePath("namespaces", "", ""), Body: nil, }, Response: Response{StatusCode: 200, Body: namespaceList}, } response, err := c.Setup(t).Namespaces().List(labels.Everything(), fields.Everything()) if err != nil { t.Errorf("%#v should be nil.", err) } if len(response.Items) != 1 { t.Errorf("%#v response.Items should have len 1.", response.Items) } responseNamespace := response.Items[0] if e, r := responseNamespace.Name, "foo"; e != r { t.Errorf("%#v != %#v.", e, r) } }
func TestServiceRegistryList(t *testing.T) { ctx := api.NewDefaultContext() storage, registry := NewTestREST(t, nil) registry.CreateService(ctx, &api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, Spec: api.ServiceSpec{ Selector: map[string]string{"bar": "baz"}, }, }) registry.CreateService(ctx, &api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo2", Namespace: api.NamespaceDefault}, Spec: api.ServiceSpec{ Selector: map[string]string{"bar2": "baz2"}, }, }) registry.List.ResourceVersion = "1" s, _ := storage.List(ctx, labels.Everything(), fields.Everything()) sl := s.(*api.ServiceList) if len(sl.Items) != 2 { t.Fatalf("Expected 2 services, but got %v", len(sl.Items)) } if e, a := "foo", sl.Items[0].Name; e != a { t.Errorf("Expected %v, but got %v", e, a) } if e, a := "foo2", sl.Items[1].Name; e != a { t.Errorf("Expected %v, but got %v", e, a) } if sl.ResourceVersion != "1" { t.Errorf("Unexpected resource version: %#v", sl) } }
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up // a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods. func StartPods(numPods int, host string, restClient *client.Client) error { start := time.Now() defer func() { glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods) }() hostField := fields.OneTermEqualSelector(client.PodHost, host) pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField) if err != nil || len(pods.Items) == numPods { return err } glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods) // For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest. controller := RCFromManifest(TestRCManifest) // Make the rc unique to the given host. controller.Spec.Replicas = numPods controller.Spec.Template.Spec.NodeName = host controller.Name = controller.Name + host controller.Spec.Selector["host"] = host controller.Spec.Template.Labels["host"] = host if rc, err := StartRC(controller, restClient); err != nil { return err } else { // Delete the rc, otherwise when we restart master components for the next benchmark // the rc controller will race with the pods controller in the rc manager. return restClient.ReplicationControllers(TestNS).Delete(rc.Name) } }
// Returns the old RCs targetted by the given Deployment. func GetOldRCs(deployment extensions.Deployment, c client.Interface) ([]*api.ReplicationController, error) { namespace := deployment.ObjectMeta.Namespace // 1. Find all pods whose labels match deployment.Spec.Selector podList, err := c.Pods(namespace).List(labels.SelectorFromSet(deployment.Spec.Selector), fields.Everything()) if err != nil { return nil, fmt.Errorf("error listing pods: %v", err) } // 2. Find the corresponding RCs for pods in podList. // TODO: Right now we list all RCs and then filter. We should add an API for this. oldRCs := map[string]api.ReplicationController{} rcList, err := c.ReplicationControllers(namespace).List(labels.Everything()) if err != nil { return nil, fmt.Errorf("error listing replication controllers: %v", err) } for _, pod := range podList.Items { podLabelsSelector := labels.Set(pod.ObjectMeta.Labels) for _, rc := range rcList.Items { rcLabelsSelector := labels.SelectorFromSet(rc.Spec.Selector) if rcLabelsSelector.Matches(podLabelsSelector) { // Filter out RC that has the same pod template spec as the deployment - that is the new RC. if api.Semantic.DeepEqual(rc.Spec.Template, GetNewRCTemplate(deployment)) { continue } oldRCs[rc.ObjectMeta.Name] = rc } } } requiredRCs := []*api.ReplicationController{} for _, value := range oldRCs { requiredRCs = append(requiredRCs, &value) } return requiredRCs, nil }
func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) { By("getting list of nodes") nodeList, err := c.Nodes().List(labels.Everything(), fields.Everything()) expectNoError(err) var errors []error retries := maxRetries for { errors = []error{} for _, node := range nodeList.Items { // cadvisor is not accessible directly unless its port (4194 by default) is exposed. // Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally. statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name) By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource)) _, err = c.Get().AbsPath(statsResource).Timeout(timeout).Do().Raw() if err != nil { errors = append(errors, err) } } if len(errors) == 0 { return } if retries--; retries <= 0 { break } Logf("failed to retrieve kubelet stats -\n %v", errors) time.Sleep(sleepDuration) } Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors) }