Exemple #1
0
func TestNewClient(t *testing.T) {
	o := NewObjects(api.Scheme, api.Scheme)
	if err := AddObjectsFromPath("../../../../examples/guestbook/frontend-service.yaml", o, api.Scheme); err != nil {
		t.Fatal(err)
	}
	client := &Fake{}
	client.AddReactor("*", "*", ObjectReaction(o, testapi.Default.RESTMapper()))
	list, err := client.Services("test").List(labels.Everything(), fields.Everything())
	if err != nil {
		t.Fatal(err)
	}
	if len(list.Items) != 1 {
		t.Fatalf("unexpected list %#v", list)
	}

	// When list is invoked a second time, the same results are returned.
	list, err = client.Services("test").List(labels.Everything(), fields.Everything())
	if err != nil {
		t.Fatal(err)
	}
	if len(list.Items) != 1 {
		t.Fatalf("unexpected list %#v", list)
	}
	t.Logf("list: %#v", list)
}
Exemple #2
0
// NewSourceAPIserver creates config source that watches for changes to the services and endpoints.
func NewSourceAPI(c *client.Client, period time.Duration, servicesChan chan<- ServiceUpdate, endpointsChan chan<- EndpointsUpdate) {
	servicesLW := cache.NewListWatchFromClient(c, "services", api.NamespaceAll, fields.Everything())
	endpointsLW := cache.NewListWatchFromClient(c, "endpoints", api.NamespaceAll, fields.Everything())

	newServicesSourceApiFromLW(servicesLW, period, servicesChan)
	newEndpointsSourceApiFromLW(endpointsLW, period, endpointsChan)
}
Exemple #3
0
func checkExistingRCRecovers(f Framework) {
	By("assert that the pre-existing replication controller recovers")
	podClient := f.Client.Pods(f.Namespace.Name)
	rcSelector := labels.Set{"name": "baz"}.AsSelector()

	By("deleting pods from existing replication controller")
	expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
		pods, err := podClient.List(rcSelector, fields.Everything())
		if err != nil {
			Logf("apiserver returned error, as expected before recovery: %v", err)
			return false, nil
		}
		if len(pods.Items) == 0 {
			return false, nil
		}
		for _, pod := range pods.Items {
			err = podClient.Delete(pod.Name, api.NewDeleteOptions(0))
			Expect(err).NotTo(HaveOccurred())
		}
		Logf("apiserver has recovered")
		return true, nil
	}))

	By("waiting for replication controller to recover")
	expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
		pods, err := podClient.List(rcSelector, fields.Everything())
		Expect(err).NotTo(HaveOccurred())
		for _, pod := range pods.Items {
			if pod.DeletionTimestamp == nil && api.IsPodReady(&pod) {
				return true, nil
			}
		}
		return false, nil
	}))
}
// newLoadBalancerController creates a new controller from the given config.
func newLoadBalancerController(cfg *loadBalancerConfig, kubeClient *unversioned.Client, namespace string) *loadBalancerController {

	lbc := loadBalancerController{
		cfg:    cfg,
		client: kubeClient,
		queue:  workqueue.New(),
		reloadRateLimiter: util.NewTokenBucketRateLimiter(
			reloadQPS, int(reloadQPS)),
		targetService:   *targetService,
		forwardServices: *forwardServices,
		httpPort:        *httpPort,
		tcpServices:     map[string]int{},
	}

	for _, service := range strings.Split(*tcpServices, ",") {
		portSplit := strings.Split(service, ":")
		if len(portSplit) != 2 {
			glog.Errorf("Ignoring misconfigured TCP service %v", service)
			continue
		}
		if port, err := strconv.Atoi(portSplit[1]); err != nil {
			glog.Errorf("Ignoring misconfigured TCP service %v: %v", service, err)
			continue
		} else {
			lbc.tcpServices[portSplit[0]] = port
		}
	}
	enqueue := func(obj interface{}) {
		key, err := keyFunc(obj)
		if err != nil {
			glog.Infof("Couldn't get key for object %+v: %v", obj, err)
			return
		}
		lbc.queue.Add(key)
	}
	eventHandlers := framework.ResourceEventHandlerFuncs{
		AddFunc:    enqueue,
		DeleteFunc: enqueue,
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				enqueue(cur)
			}
		},
	}

	lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
		cache.NewListWatchFromClient(
			lbc.client, "services", namespace, fields.Everything()),
		&api.Service{}, resyncPeriod, eventHandlers)

	lbc.epLister.Store, lbc.epController = framework.NewInformer(
		cache.NewListWatchFromClient(
			lbc.client, "endpoints", namespace, fields.Everything()),
		&api.Endpoints{}, resyncPeriod, eventHandlers)

	return &lbc
}
// NewPersistentVolumeClaimBinder creates a new PersistentVolumeClaimBinder
func NewPersistentVolumeClaimBinder(kubeClient client.Interface, syncPeriod time.Duration) *PersistentVolumeClaimBinder {
	volumeIndex := NewPersistentVolumeOrderedIndex()
	binderClient := NewBinderClient(kubeClient)
	binder := &PersistentVolumeClaimBinder{
		volumeIndex: volumeIndex,
		client:      binderClient,
	}

	_, volumeController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.PersistentVolumes().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return kubeClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.PersistentVolume{},
		// TODO: Can we have much longer period here?
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    binder.addVolume,
			UpdateFunc: binder.updateVolume,
			DeleteFunc: binder.deleteVolume,
		},
	)
	_, claimController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.PersistentVolumeClaims(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return kubeClient.PersistentVolumeClaims(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.PersistentVolumeClaim{},
		// TODO: Can we have much longer period here?
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    binder.addClaim,
			UpdateFunc: binder.updateClaim,
			// no DeleteFunc needed.  a claim requires no clean-up.
			// syncVolume handles the missing claim
		},
	)

	binder.claimController = claimController
	binder.volumeController = volumeController

	return binder
}
Exemple #6
0
// NewTokensController returns a new *TokensController.
func NewTokensController(cl client.Interface, options TokensControllerOptions) *TokensController {
	e := &TokensController{
		client: cl,
		token:  options.TokenGenerator,
		rootCA: options.RootCA,
	}

	e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&api.ServiceAccount{},
		options.ServiceAccountResync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.serviceAccountAdded,
			UpdateFunc: e.serviceAccountUpdated,
			DeleteFunc: e.serviceAccountDeleted,
		},
		cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
	)

	tokenSelector := fields.SelectorFromSet(map[string]string{client.SecretType: string(api.SecretTypeServiceAccountToken)})
	e.secrets, e.secretController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.Secrets(api.NamespaceAll).List(labels.Everything(), tokenSelector)
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return e.client.Secrets(api.NamespaceAll).Watch(labels.Everything(), tokenSelector, rv)
			},
		},
		&api.Secret{},
		options.SecretResync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.secretAdded,
			UpdateFunc: e.secretUpdated,
			DeleteFunc: e.secretDeleted,
		},
		cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
	)

	e.serviceAccountsSynced = e.serviceAccountController.HasSynced
	e.secretsSynced = e.secretController.HasSynced

	return e
}
func doServiceAccountAPIRequests(t *testing.T, c *client.Client, ns string, authenticated bool, canRead bool, canWrite bool) {
	testSecret := &api.Secret{
		ObjectMeta: api.ObjectMeta{Name: "testSecret"},
		Data:       map[string][]byte{"test": []byte("data")},
	}

	readOps := []testOperation{
		func() error { _, err := c.Secrets(ns).List(labels.Everything(), fields.Everything()); return err },
		func() error { _, err := c.Pods(ns).List(labels.Everything(), fields.Everything()); return err },
	}
	writeOps := []testOperation{
		func() error { _, err := c.Secrets(ns).Create(testSecret); return err },
		func() error { return c.Secrets(ns).Delete(testSecret.Name) },
	}

	for _, op := range readOps {
		err := op()
		unauthorizedError := errors.IsUnauthorized(err)
		forbiddenError := errors.IsForbidden(err)

		switch {
		case !authenticated && !unauthorizedError:
			t.Fatalf("expected unauthorized error, got %v", err)
		case authenticated && unauthorizedError:
			t.Fatalf("unexpected unauthorized error: %v", err)
		case authenticated && canRead && forbiddenError:
			t.Fatalf("unexpected forbidden error: %v", err)
		case authenticated && !canRead && !forbiddenError:
			t.Fatalf("expected forbidden error, got: %v", err)
		}
	}

	for _, op := range writeOps {
		err := op()
		unauthorizedError := errors.IsUnauthorized(err)
		forbiddenError := errors.IsForbidden(err)

		switch {
		case !authenticated && !unauthorizedError:
			t.Fatalf("expected unauthorized error, got %v", err)
		case authenticated && unauthorizedError:
			t.Fatalf("unexpected unauthorized error: %v", err)
		case authenticated && canWrite && forbiddenError:
			t.Fatalf("unexpected forbidden error: %v", err)
		case authenticated && !canWrite && !forbiddenError:
			t.Fatalf("expected forbidden error, got: %v", err)
		}
	}
}
// NewServiceAccountsController returns a new *ServiceAccountsController.
func NewServiceAccountsController(cl client.Interface, options ServiceAccountsControllerOptions) *ServiceAccountsController {
	e := &ServiceAccountsController{
		client: cl,
		names:  options.Names,
	}

	accountSelector := fields.Everything()
	if len(options.Names) == 1 {
		// If we're maintaining a single account, we can scope the accounts we watch to just that name
		accountSelector = fields.SelectorFromSet(map[string]string{client.ObjectNameField: options.Names.List()[0]})
	}
	e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).List(labels.Everything(), accountSelector)
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).Watch(labels.Everything(), accountSelector, rv)
			},
		},
		&api.ServiceAccount{},
		options.ServiceAccountResync,
		framework.ResourceEventHandlerFuncs{
			DeleteFunc: e.serviceAccountDeleted,
		},
		cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
	)

	e.namespaces, e.namespaceController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return e.client.Namespaces().Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&api.Namespace{},
		options.NamespaceResync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.namespaceAdded,
			UpdateFunc: e.namespaceUpdated,
		},
		cache.Indexers{"name": nameIndexFunc},
	)

	return e
}
Exemple #9
0
func (t *Tester) testListFound(obj runtime.Object, assignFn AssignFunc) {
	ctx := t.TestContext()

	foo1 := copyOrDie(obj)
	t.setObjectMeta(foo1, "foo1")
	foo2 := copyOrDie(obj)
	t.setObjectMeta(foo2, "foo2")

	existing := assignFn([]runtime.Object{foo1, foo2})

	listObj, err := t.storage.(rest.Lister).List(ctx, labels.Everything(), fields.Everything())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	items, err := listToItems(listObj)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if len(items) != len(existing) {
		t.Errorf("unexpected number of items: %v", len(items))
	}
	if !api.Semantic.DeepEqual(existing, items) {
		t.Errorf("expected: %#v, got: %#v", existing, items)
	}
}
Exemple #10
0
// afterEach deletes the namespace, after reading its events.
func (f *Framework) afterEach() {
	// Print events if the test failed.
	if CurrentGinkgoTestDescription().Failed {
		By(fmt.Sprintf("Collecting events from namespace %q.", f.Namespace.Name))
		events, err := f.Client.Events(f.Namespace.Name).List(labels.Everything(), fields.Everything())
		Expect(err).NotTo(HaveOccurred())

		for _, e := range events.Items {
			Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
		}
		// Note that we don't wait for any cleanup to propagate, which means
		// that if you delete a bunch of pods right before ending your test,
		// you may or may not see the killing/deletion/cleanup events.

		dumpAllPodInfo(f.Client)
	}

	// Check whether all nodes are ready after the test.
	if err := allNodesReady(f.Client, time.Minute); err != nil {
		Failf("All nodes should be ready after test, %v", err)
	}

	By(fmt.Sprintf("Destroying namespace %q for this suite.", f.Namespace.Name))

	timeout := 5 * time.Minute
	if f.NamespaceDeletionTimeout != 0 {
		timeout = f.NamespaceDeletionTimeout
	}
	if err := deleteNS(f.Client, f.Namespace.Name, timeout); err != nil {
		Failf("Couldn't delete ns %q: %s", f.Namespace.Name, err)
	}
	// Paranoia-- prevent reuse!
	f.Namespace = nil
	f.Client = nil
}
Exemple #11
0
func (s *storage) ListThirdPartyResourceData(ctx api.Context, label labels.Selector) (*extensions.ThirdPartyResourceDataList, error) {
	obj, err := s.List(ctx, label, fields.Everything())
	if err != nil {
		return nil, err
	}
	return obj.(*extensions.ThirdPartyResourceDataList), nil
}
Exemple #12
0
func (t *Tester) testListMatchLabels(obj runtime.Object, assignFn AssignFunc) {
	ctx := t.TestContext()
	testLabels := map[string]string{"key": "value"}

	foo1 := copyOrDie(obj)
	t.setObjectMeta(foo1, "foo1")
	foo2 := copyOrDie(obj)
	foo2Meta := t.getObjectMetaOrFail(foo2)
	foo2Meta.Name = "foo2"
	foo2Meta.Namespace = api.NamespaceValue(ctx)
	foo2Meta.Labels = testLabels

	existing := assignFn([]runtime.Object{foo1, foo2})
	filtered := []runtime.Object{existing[1]}

	selector := labels.SelectorFromSet(labels.Set(testLabels))
	listObj, err := t.storage.(rest.Lister).List(ctx, selector, fields.Everything())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	items, err := listToItems(listObj)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if len(items) != len(filtered) {
		t.Errorf("unexpected number of items: %v", len(items))
	}
	if !api.Semantic.DeepEqual(filtered, items) {
		t.Errorf("expected: %#v, got: %#v", filtered, items)
	}
}
Exemple #13
0
func TestListServicesLabels(t *testing.T) {
	ns := api.NamespaceDefault
	labelSelectorQueryParamName := unversioned.LabelSelectorQueryParam(testapi.Default.Version())
	c := &testClient{
		Request: testRequest{
			Method: "GET",
			Path:   testapi.Default.ResourcePath("services", ns, ""),
			Query:  buildQueryValues(url.Values{labelSelectorQueryParamName: []string{"foo=bar,name=baz"}})},
		Response: Response{StatusCode: 200,
			Body: &api.ServiceList{
				Items: []api.Service{
					{
						ObjectMeta: api.ObjectMeta{
							Name: "name",
							Labels: map[string]string{
								"foo":  "bar",
								"name": "baz",
							},
						},
						Spec: api.ServiceSpec{
							Selector: map[string]string{
								"one": "two",
							},
						},
					},
				},
			},
		},
	}
	c.Setup(t)
	c.QueryValidator[labelSelectorQueryParamName] = validateLabels
	selector := labels.Set{"foo": "bar", "name": "baz"}.AsSelector()
	receivedServiceList, err := c.Services(ns).List(selector, fields.Everything())
	c.Validate(t, receivedServiceList, err)
}
Exemple #14
0
func TestListServices(t *testing.T) {
	ns := api.NamespaceDefault
	c := &testClient{
		Request: testRequest{
			Method: "GET",
			Path:   testapi.Default.ResourcePath("services", ns, ""),
			Query:  buildQueryValues(nil)},
		Response: Response{StatusCode: 200,
			Body: &api.ServiceList{
				Items: []api.Service{
					{
						ObjectMeta: api.ObjectMeta{
							Name: "name",
							Labels: map[string]string{
								"foo":  "bar",
								"name": "baz",
							},
						},
						Spec: api.ServiceSpec{
							Selector: map[string]string{
								"one": "two",
							},
						},
					},
				},
			},
		},
	}
	receivedServiceList, err := c.Setup(t).Services(ns).List(labels.Everything(), fields.Everything())
	t.Logf("received services: %v %#v", err, receivedServiceList)
	c.Validate(t, receivedServiceList, err)
}
Exemple #15
0
func TestListPods(t *testing.T) {
	ns := api.NamespaceDefault
	c := &testClient{
		Request: testRequest{Method: "GET", Path: testapi.Default.ResourcePath("pods", ns, ""), Query: buildQueryValues(nil)},
		Response: Response{StatusCode: 200,
			Body: &api.PodList{
				Items: []api.Pod{
					{
						Status: api.PodStatus{
							Phase: api.PodRunning,
						},
						ObjectMeta: api.ObjectMeta{
							Labels: map[string]string{
								"foo":  "bar",
								"name": "baz",
							},
						},
					},
				},
			},
		},
	}
	receivedPodList, err := c.Setup(t).Pods(ns).List(labels.Everything(), fields.Everything())
	c.Validate(t, receivedPodList, err)
}
Exemple #16
0
func (ks *KubernetesScheduler) recoverTasks() error {
	podList, err := ks.client.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
	if err != nil {
		log.V(1).Infof("failed to recover pod registry, madness may ensue: %v", err)
		return err
	}
	recoverSlave := func(t *podtask.T) {

		slaveId := t.Spec.SlaveID
		ks.slaveHostNames.Register(slaveId, t.Offer.Host())
	}
	for _, pod := range podList.Items {
		if _, isMirrorPod := pod.Annotations[kubelet.ConfigMirrorAnnotationKey]; isMirrorPod {
			// mirrored pods are never reconciled because the scheduler isn't responsible for
			// scheduling them; they're started by the executor/kubelet upon instantiation and
			// reflected in the apiserver afterward. the scheduler has no knowledge of them.
			continue
		}
		if t, ok, err := podtask.RecoverFrom(pod); err != nil {
			log.Errorf("failed to recover task from pod, will attempt to delete '%v/%v': %v", pod.Namespace, pod.Name, err)
			err := ks.client.Pods(pod.Namespace).Delete(pod.Name, nil)
			//TODO(jdef) check for temporary or not-found errors
			if err != nil {
				log.Errorf("failed to delete pod '%v/%v': %v", pod.Namespace, pod.Name, err)
			}
		} else if ok {
			ks.taskRegistry.Register(t, nil)
			recoverSlave(t)
			log.Infof("recovered task %v from pod %v/%v", t.ID, pod.Namespace, pod.Name)
		}
	}
	return nil
}
func deletePods(kubeClient client.Interface, ns string, before unversioned.Time) (int64, error) {
	items, err := kubeClient.Pods(ns).List(labels.Everything(), fields.Everything())
	if err != nil {
		return 0, err
	}
	expired := unversioned.Now().After(before.Time)
	var deleteOptions *api.DeleteOptions
	if expired {
		deleteOptions = api.NewDeleteOptions(0)
	}
	estimate := int64(0)
	for i := range items.Items {
		if items.Items[i].Spec.TerminationGracePeriodSeconds != nil {
			grace := *items.Items[i].Spec.TerminationGracePeriodSeconds
			if grace > estimate {
				estimate = grace
			}
		}
		err := kubeClient.Pods(ns).Delete(items.Items[i].Name, deleteOptions)
		if err != nil && !errors.IsNotFound(err) {
			return 0, err
		}
	}
	if expired {
		estimate = 0
	}
	return estimate, nil
}
Exemple #18
0
// pollForReadyPods polls oldRc and newRc each interval and returns the old
// and new ready counts for their pods. If a pod is observed as being ready,
// it's considered ready even if it later becomes notReady.
func (r *RollingUpdater) pollForReadyPods(interval, timeout time.Duration, oldRc, newRc *api.ReplicationController) (int, int, error) {
	controllers := []*api.ReplicationController{oldRc, newRc}
	oldReady := 0
	newReady := 0
	err := wait.Poll(interval, timeout, func() (done bool, err error) {
		anyReady := false
		for _, controller := range controllers {
			selector := labels.Set(controller.Spec.Selector).AsSelector()
			pods, err := r.c.Pods(controller.Namespace).List(selector, fields.Everything())
			if err != nil {
				return false, err
			}
			for _, pod := range pods.Items {
				if api.IsPodReady(&pod) {
					switch controller.Name {
					case oldRc.Name:
						oldReady++
					case newRc.Name:
						newReady++
					}
					anyReady = true
				}
			}
		}
		if anyReady {
			return true, nil
		}
		return false, nil
	})
	return oldReady, newReady, err
}
Exemple #19
0
func TestNamespaceList(t *testing.T) {
	namespaceList := &api.NamespaceList{
		Items: []api.Namespace{
			{
				ObjectMeta: api.ObjectMeta{Name: "foo"},
			},
		},
	}
	c := &testClient{
		Request: testRequest{
			Method: "GET",
			Path:   testapi.Default.ResourcePath("namespaces", "", ""),
			Body:   nil,
		},
		Response: Response{StatusCode: 200, Body: namespaceList},
	}
	response, err := c.Setup(t).Namespaces().List(labels.Everything(), fields.Everything())

	if err != nil {
		t.Errorf("%#v should be nil.", err)
	}

	if len(response.Items) != 1 {
		t.Errorf("%#v response.Items should have len 1.", response.Items)
	}

	responseNamespace := response.Items[0]
	if e, r := responseNamespace.Name, "foo"; e != r {
		t.Errorf("%#v != %#v.", e, r)
	}
}
Exemple #20
0
func main() {
	var ingClient client.IngressInterface
	if kubeClient, err := client.NewInCluster(); err != nil {
		log.Fatalf("Failed to create client: %v.", err)
	} else {
		ingClient = kubeClient.Extensions().Ingress(api.NamespaceAll)
	}
	tmpl, _ := template.New("nginx").Parse(nginxConf)
	rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1)
	known := &extensions.IngressList{}

	// Controller loop
	shellOut("nginx")
	for {
		rateLimiter.Accept()
		ingresses, err := ingClient.List(labels.Everything(), fields.Everything())
		if err != nil {
			log.Printf("Error retrieving ingresses: %v", err)
			continue
		}
		if reflect.DeepEqual(ingresses.Items, known.Items) {
			continue
		}
		known = ingresses
		if w, err := os.Create("/etc/nginx/nginx.conf"); err != nil {
			log.Fatalf("Failed to open %v: %v", nginxConf, err)
		} else if err := tmpl.Execute(w, ingresses); err != nil {
			log.Fatalf("Failed to write template %v", err)
		}
		shellOut("nginx -s reload")
	}
}
Exemple #21
0
func TestListNodesLabels(t *testing.T) {
	labelSelectorQueryParamName := unversioned.LabelSelectorQueryParam(testapi.Default.Version())
	c := &testClient{
		Request: testRequest{
			Method: "GET",
			Path:   testapi.Default.ResourcePath(getNodesResourceName(), "", ""),
			Query:  buildQueryValues(url.Values{labelSelectorQueryParamName: []string{"foo=bar,name=baz"}})},
		Response: Response{
			StatusCode: 200,
			Body: &api.NodeList{
				Items: []api.Node{
					{
						ObjectMeta: api.ObjectMeta{
							Labels: map[string]string{
								"foo":  "bar",
								"name": "baz",
							},
						},
					},
				},
			},
		},
	}
	c.Setup(t)
	c.QueryValidator[labelSelectorQueryParamName] = validateLabels
	selector := labels.Set{"foo": "bar", "name": "baz"}.AsSelector()
	receivedNodeList, err := c.Nodes().List(selector, fields.Everything())
	c.Validate(t, receivedNodeList, err)
}
Exemple #22
0
func TestListJobs(t *testing.T) {
	ns := api.NamespaceAll
	c := &testClient{
		Request: testRequest{
			Method: "GET",
			Path:   testapi.Extensions.ResourcePath(getJobResourceName(), ns, ""),
		},
		Response: Response{StatusCode: 200,
			Body: &extensions.JobList{
				Items: []extensions.Job{
					{
						ObjectMeta: api.ObjectMeta{
							Name: "foo",
							Labels: map[string]string{
								"foo":  "bar",
								"name": "baz",
							},
						},
						Spec: extensions.JobSpec{
							Template: api.PodTemplateSpec{},
						},
					},
				},
			},
		},
	}
	receivedJobList, err := c.Setup(t).Extensions().Jobs(ns).List(labels.Everything(), fields.Everything())
	c.Validate(t, receivedJobList, err)
}
Exemple #23
0
func podsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) {
	timeout := 2 * time.Minute
	// List the pods, making sure we observe all the replicas.
	label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
	for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
		pods, err := c.Pods(ns).List(label, fields.Everything())
		if err != nil {
			return nil, err
		}

		created := []api.Pod{}
		for _, pod := range pods.Items {
			if pod.DeletionTimestamp != nil {
				continue
			}
			created = append(created, pod)
		}
		Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)

		if len(created) == replicas {
			pods.Items = created
			return pods, nil
		}
	}
	return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
Exemple #24
0
func (t *Tester) testListNotFound(assignFn AssignFunc, setRVFn SetRVFunc) {
	ctx := t.TestContext()

	setRVFn(uint64(123))
	_ = assignFn([]runtime.Object{})

	listObj, err := t.storage.(rest.Lister).List(ctx, labels.Everything(), fields.Everything())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	items, err := listToItems(listObj)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if len(items) != 0 {
		t.Errorf("unexpected items: %v", items)
	}

	meta, err := api.ListMetaFor(listObj)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if meta.ResourceVersion != "123" {
		t.Errorf("unexpected resource version: %d", meta.ResourceVersion)
	}
}
Exemple #25
0
func (h *HeapsterMetricsClient) GetResourceConsumptionAndRequest(resourceName api.ResourceName, namespace string, selector map[string]string) (consumption *ResourceConsumption, request *resource.Quantity, timestamp time.Time, err error) {
	podList, err := h.client.Pods(namespace).
		List(labels.SelectorFromSet(labels.Set(selector)), fields.Everything())

	if err != nil {
		return nil, nil, time.Time{}, fmt.Errorf("failed to get pod list: %v", err)
	}
	podNames := []string{}
	sum := resource.MustParse("0")
	missing := false
	for _, pod := range podList.Items {
		podNames = append(podNames, pod.Name)
		for _, container := range pod.Spec.Containers {
			containerRequest := container.Resources.Requests[resourceName]
			if containerRequest.Amount != nil {
				sum.Add(containerRequest)
			} else {
				missing = true
			}
		}
	}
	if missing || sum.Cmp(resource.MustParse("0")) == 0 {
		return nil, nil, time.Time{}, fmt.Errorf("some pods do not have request for %s", resourceName)
	}
	glog.Infof("Sum of %s requested: %v", resourceName, sum)
	avg := resource.MustParse(fmt.Sprintf("%dm", sum.MilliValue()/int64(len(podList.Items))))
	request = &avg
	consumption, timestamp, err = h.getForPods(resourceName, namespace, podNames)
	if err != nil {
		return nil, nil, time.Time{}, err
	}
	return consumption, request, timestamp, nil
}
Exemple #26
0
func TestServiceRegistryList(t *testing.T) {
	ctx := api.NewDefaultContext()
	storage, registry := NewTestREST(t, nil)
	registry.CreateService(ctx, &api.Service{
		ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault},
		Spec: api.ServiceSpec{
			Selector: map[string]string{"bar": "baz"},
		},
	})
	registry.CreateService(ctx, &api.Service{
		ObjectMeta: api.ObjectMeta{Name: "foo2", Namespace: api.NamespaceDefault},
		Spec: api.ServiceSpec{
			Selector: map[string]string{"bar2": "baz2"},
		},
	})
	registry.List.ResourceVersion = "1"
	s, _ := storage.List(ctx, labels.Everything(), fields.Everything())
	sl := s.(*api.ServiceList)
	if len(sl.Items) != 2 {
		t.Fatalf("Expected 2 services, but got %v", len(sl.Items))
	}
	if e, a := "foo", sl.Items[0].Name; e != a {
		t.Errorf("Expected %v, but got %v", e, a)
	}
	if e, a := "foo2", sl.Items[1].Name; e != a {
		t.Errorf("Expected %v, but got %v", e, a)
	}
	if sl.ResourceVersion != "1" {
		t.Errorf("Unexpected resource version: %#v", sl)
	}
}
Exemple #27
0
func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) {
	By("getting list of nodes")
	nodeList, err := c.Nodes().List(labels.Everything(), fields.Everything())
	expectNoError(err)
	var errors []error
	retries := maxRetries
	for {
		errors = []error{}
		for _, node := range nodeList.Items {
			// cadvisor is not accessible directly unless its port (4194 by default) is exposed.
			// Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally.
			statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name)
			By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource))
			_, err = c.Get().AbsPath(statsResource).Timeout(timeout).Do().Raw()
			if err != nil {
				errors = append(errors, err)
			}
		}
		if len(errors) == 0 {
			return
		}
		if retries--; retries <= 0 {
			break
		}
		Logf("failed to retrieve kubelet stats -\n %v", errors)
		time.Sleep(sleepDuration)
	}
	Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors)
}
Exemple #28
0
func testReboot(c *client.Client, rebootCmd string) {
	// Get all nodes, and kick off the test on each.
	nodelist, err := listNodes(c, labels.Everything(), fields.Everything())
	if err != nil {
		Failf("Error getting nodes: %v", err)
	}
	result := make(chan bool, len(nodelist.Items))
	for _, n := range nodelist.Items {
		go rebootNode(c, testContext.Provider, n.ObjectMeta.Name, rebootCmd, result)
	}

	// Wait for all to finish and check the final result.
	failed := false
	// TODO(a-robinson): Change to `for range` syntax and remove logging once
	// we support only Go >= 1.4.
	for _, n := range nodelist.Items {
		if !<-result {
			Failf("Node %s failed reboot test.", n.ObjectMeta.Name)
			failed = true
		}
	}
	if failed {
		Failf("Test failed; at least one node failed to reboot in the time given.")
	}
}
Exemple #29
0
func (s *storage) ListSecrets(ctx api.Context, label labels.Selector) (*api.SecretList, error) {
	obj, err := s.List(ctx, label, fields.Everything())
	if err != nil {
		return nil, err
	}
	return obj.(*api.SecretList), nil
}
Exemple #30
0
// Returns the old RCs targetted by the given Deployment.
func GetOldRCs(deployment extensions.Deployment, c client.Interface) ([]*api.ReplicationController, error) {
	namespace := deployment.ObjectMeta.Namespace
	// 1. Find all pods whose labels match deployment.Spec.Selector
	podList, err := c.Pods(namespace).List(labels.SelectorFromSet(deployment.Spec.Selector), fields.Everything())
	if err != nil {
		return nil, fmt.Errorf("error listing pods: %v", err)
	}
	// 2. Find the corresponding RCs for pods in podList.
	// TODO: Right now we list all RCs and then filter. We should add an API for this.
	oldRCs := map[string]api.ReplicationController{}
	rcList, err := c.ReplicationControllers(namespace).List(labels.Everything(), fields.Everything())
	if err != nil {
		return nil, fmt.Errorf("error listing replication controllers: %v", err)
	}
	for _, pod := range podList.Items {
		podLabelsSelector := labels.Set(pod.ObjectMeta.Labels)
		for _, rc := range rcList.Items {
			rcLabelsSelector := labels.SelectorFromSet(rc.Spec.Selector)
			if rcLabelsSelector.Matches(podLabelsSelector) {
				// Filter out RC that has the same pod template spec as the deployment - that is the new RC.
				if api.Semantic.DeepEqual(rc.Spec.Template, GetNewRCTemplate(deployment)) {
					continue
				}
				oldRCs[rc.ObjectMeta.Name] = rc
			}
		}
	}
	requiredRCs := []*api.ReplicationController{}
	for _, value := range oldRCs {
		requiredRCs = append(requiredRCs, &value)
	}
	return requiredRCs, nil
}