// NewNamespaceManager creates a new NamespaceManager
func NewNamespaceManager(qingClient client.Interface, resyncPeriod time.Duration) *NamespaceManager {
	_, controller := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return qingClient.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return qingClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.Namespace{},
		resyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				namespace := obj.(*api.Namespace)
				err := syncNamespace(qingClient, *namespace)
				if err != nil {
					glog.Error(err)
				}
			},
			UpdateFunc: func(oldObj, newObj interface{}) {
				namespace := newObj.(*api.Namespace)
				err := syncNamespace(qingClient, *namespace)
				if err != nil {
					glog.Error(err)
				}
			},
		},
	)

	return &NamespaceManager{
		controller: controller,
	}
}
예제 #2
0
// run loops forever looking for changes to services.
func (s *servicesReflector) run(resourceVersion *string) {
	if len(*resourceVersion) == 0 {
		services, err := s.watcher.List(labels.Everything())
		if err != nil {
			glog.Errorf("Unable to load services: %v", err)
			// TODO: reconcile with pkg/client/cache which doesn't use reflector.
			time.Sleep(wait.Jitter(s.waitDuration, 0.0))
			return
		}
		*resourceVersion = services.ResourceVersion
		// TODO: replace with code to update the
		s.services <- ServiceUpdate{Op: SET, Services: services.Items}
	}

	watcher, err := s.watcher.Watch(labels.Everything(), fields.Everything(), *resourceVersion)
	if err != nil {
		glog.Errorf("Unable to watch for services changes: %v", err)
		if !client.IsTimeout(err) {
			// Reset so that we do a fresh get request
			*resourceVersion = ""
		}
		time.Sleep(wait.Jitter(s.waitDuration, 0.0))
		return
	}
	defer watcher.Stop()

	ch := watcher.ResultChan()
	s.watchHandler(resourceVersion, ch, s.services)
}
예제 #3
0
// run loops forever looking for changes to endpoints.
func (s *endpointsReflector) run(resourceVersion *string) {
	if len(*resourceVersion) == 0 {
		endpoints, err := s.watcher.List(labels.Everything())
		if err != nil {
			glog.Errorf("Unable to load endpoints: %v", err)
			time.Sleep(wait.Jitter(s.waitDuration, 0.0))
			return
		}
		*resourceVersion = endpoints.ResourceVersion
		s.endpoints <- EndpointsUpdate{Op: SET, Endpoints: endpoints.Items}
	}

	watcher, err := s.watcher.Watch(labels.Everything(), fields.Everything(), *resourceVersion)
	if err != nil {
		glog.Errorf("Unable to watch for endpoints changes: %v", err)
		if !client.IsTimeout(err) {
			// Reset so that we do a fresh get request
			*resourceVersion = ""
		}

		time.Sleep(wait.Jitter(s.waitDuration, 0.0))
		return
	}
	defer watcher.Stop()

	ch := watcher.ResultChan()
	s.watchHandler(resourceVersion, ch, s.endpoints)
}
예제 #4
0
// NewResourceQuota creates a new resource quota admission control handler
func NewResourceQuota(client client.Interface) admission.Interface {
	lw := &cache.ListWatch{
		ListFunc: func() (runtime.Object, error) {
			return client.ResourceQuotas(api.NamespaceAll).List(labels.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return client.ResourceQuotas(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.ResourceQuota{}, 0)
	reflector.Run()
	return createResourceQuota(client, indexer)
}
예제 #5
0
// NewTokensController returns a new *TokensController.
func NewTokensController(cl client.Interface, options TokensControllerOptions) *TokensController {
	e := &TokensController{
		client: cl,
		token:  options.TokenGenerator,
	}

	e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&api.ServiceAccount{},
		options.ServiceAccountResync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.serviceAccountAdded,
			UpdateFunc: e.serviceAccountUpdated,
			DeleteFunc: e.serviceAccountDeleted,
		},
		cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
	)

	tokenSelector := fields.SelectorFromSet(map[string]string{client.SecretType: string(api.SecretTypeServiceAccountToken)})
	e.secrets, e.secretController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.Secrets(api.NamespaceAll).List(labels.Everything(), tokenSelector)
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return e.client.Secrets(api.NamespaceAll).Watch(labels.Everything(), tokenSelector, rv)
			},
		},
		&api.Secret{},
		options.SecretResync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.secretAdded,
			UpdateFunc: e.secretUpdated,
			DeleteFunc: e.secretDeleted,
		},
		cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
	)

	e.serviceAccountsSynced = e.serviceAccountController.HasSynced
	e.secretsSynced = e.secretController.HasSynced

	return e
}
func doServiceAccountAPIRequests(t *testing.T, c *client.Client, ns string, authenticated bool, canRead bool, canWrite bool) {
	testSecret := &api.Secret{
		ObjectMeta: api.ObjectMeta{Name: "testSecret"},
		Data:       map[string][]byte{"test": []byte("data")},
	}

	readOps := []testOperation{
		func() error { _, err := c.Secrets(ns).List(labels.Everything(), fields.Everything()); return err },
		func() error { _, err := c.Pods(ns).List(labels.Everything(), fields.Everything()); return err },
	}
	writeOps := []testOperation{
		func() error { _, err := c.Secrets(ns).Create(testSecret); return err },
		func() error { return c.Secrets(ns).Delete(testSecret.Name) },
	}

	for _, op := range readOps {
		err := op()
		unauthorizedError := errors.IsUnauthorized(err)
		forbiddenError := errors.IsForbidden(err)

		switch {
		case !authenticated && !unauthorizedError:
			t.Fatalf("expected unauthorized error, got %v", err)
		case authenticated && unauthorizedError:
			t.Fatalf("unexpected unauthorized error: %v", err)
		case authenticated && canRead && forbiddenError:
			t.Fatalf("unexpected forbidden error: %v", err)
		case authenticated && !canRead && !forbiddenError:
			t.Fatalf("expected forbidden error, got: %v", err)
		}
	}

	for _, op := range writeOps {
		err := op()
		unauthorizedError := errors.IsUnauthorized(err)
		forbiddenError := errors.IsForbidden(err)

		switch {
		case !authenticated && !unauthorizedError:
			t.Fatalf("expected unauthorized error, got %v", err)
		case authenticated && unauthorizedError:
			t.Fatalf("unexpected unauthorized error: %v", err)
		case authenticated && canWrite && forbiddenError:
			t.Fatalf("unexpected forbidden error: %v", err)
		case authenticated && !canWrite && !forbiddenError:
			t.Fatalf("expected forbidden error, got: %v", err)
		}
	}
}
// NewPersistentVolumeClaimBinder creates a new PersistentVolumeClaimBinder
func NewPersistentVolumeClaimBinder(qingClient client.Interface, syncPeriod time.Duration) *PersistentVolumeClaimBinder {
	volumeIndex := NewPersistentVolumeOrderedIndex()
	binderClient := NewBinderClient(qingClient)
	binder := &PersistentVolumeClaimBinder{
		volumeIndex: volumeIndex,
		client:      binderClient,
	}

	_, volumeController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return qingClient.PersistentVolumes().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return qingClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.PersistentVolume{},
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    binder.addVolume,
			UpdateFunc: binder.updateVolume,
			DeleteFunc: binder.deleteVolume,
		},
	)
	_, claimController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return qingClient.PersistentVolumeClaims(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return qingClient.PersistentVolumeClaims(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.PersistentVolumeClaim{},
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    binder.addClaim,
			UpdateFunc: binder.updateClaim,
			// no DeleteFunc needed.  a claim requires no clean-up.
			// syncVolume handles the missing claim
		},
	)

	binder.claimController = claimController
	binder.volumeController = volumeController

	return binder
}
// NewServiceAccountsController returns a new *ServiceAccountsController.
func NewServiceAccountsController(cl client.Interface, options ServiceAccountsControllerOptions) *ServiceAccountsController {
	e := &ServiceAccountsController{
		client: cl,
		names:  options.Names,
	}

	accountSelector := fields.Everything()
	if len(options.Names) == 1 {
		// If we're maintaining a single account, we can scope the accounts we watch to just that name
		accountSelector = fields.SelectorFromSet(map[string]string{client.ObjectNameField: options.Names.List()[0]})
	}
	e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).List(labels.Everything(), accountSelector)
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).Watch(labels.Everything(), accountSelector, rv)
			},
		},
		&api.ServiceAccount{},
		options.ServiceAccountResync,
		framework.ResourceEventHandlerFuncs{
			DeleteFunc: e.serviceAccountDeleted,
		},
		cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
	)

	e.namespaces, e.namespaceController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return e.client.Namespaces().Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&api.Namespace{},
		options.NamespaceResync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.namespaceAdded,
			UpdateFunc: e.namespaceUpdated,
		},
		cache.Indexers{"name": nameIndexFunc},
	)

	return e
}
예제 #9
0
func extinguish(c *client.Client, totalNS int, maxAllowedAfterDel int, maxSeconds int) {

	var err error

	for n := 0; n < totalNS; n += 1 {
		_, err = createTestingNS(fmt.Sprintf("nslifetest-%v", n), c)
		Expect(err).NotTo(HaveOccurred())
	}

	//Wait 10 seconds, then SEND delete requests for all the namespaces.
	time.Sleep(time.Duration(10 * time.Second))
	nsList, err := c.Namespaces().List(labels.Everything(), fields.Everything())
	Expect(err).NotTo(HaveOccurred())
	for _, item := range nsList.Items {
		if strings.Contains(item.Name, "nslifetest") {
			if err := c.Namespaces().Delete(item.Name); err != nil {
				Failf("Failed deleting error ::: --- %v ", err)
			}
		}
		Logf("namespace : %v api call to delete is complete ", item)
	}

	//Now POLL until all namespaces have been eradicated.
	expectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second,
		func() (bool, error) {
			if rem, err := countRemaining(c, "nslifetest"); err != nil || rem > maxAllowedAfterDel {
				Logf("Remaining namespaces : %v", rem)
				return false, err
			} else {
				return true, nil
			}
		}))

}
예제 #10
0
func TestRESTWatch(t *testing.T) {
	eventA := &api.Event{
		InvolvedObject: api.ObjectReference{
			Kind:            "Pod",
			Name:            "foo",
			UID:             "long uid string",
			APIVersion:      testapi.Version(),
			ResourceVersion: "0",
			FieldPath:       "",
		},
		Reason: "ForTesting",
	}
	reg, rest := NewTestREST()
	wi, err := rest.Watch(api.NewContext(), labels.Everything(), fields.Everything(), "0")
	if err != nil {
		t.Fatalf("Unexpected error %v", err)
	}
	go func() {
		reg.Broadcaster.Action(watch.Added, eventA)
	}()
	got := <-wi.ResultChan()
	if e, a := eventA, got.Object; !reflect.DeepEqual(e, a) {
		t.Errorf("diff: %s", util.ObjectDiff(e, a))
	}
}
예제 #11
0
func TestNamespaceList(t *testing.T) {
	namespaceList := &api.NamespaceList{
		Items: []api.Namespace{
			{
				ObjectMeta: api.ObjectMeta{Name: "foo"},
			},
		},
	}
	c := &testClient{
		Request: testRequest{
			Method: "GET",
			Path:   testapi.ResourcePath("namespaces", "", ""),
			Body:   nil,
		},
		Response: Response{StatusCode: 200, Body: namespaceList},
	}
	response, err := c.Setup().Namespaces().List(labels.Everything(), fields.Everything())

	if err != nil {
		t.Errorf("%#v should be nil.", err)
	}

	if len(response.Items) != 1 {
		t.Errorf("%#v response.Items should have len 1.", response.Items)
	}

	responseNamespace := response.Items[0]
	if e, r := responseNamespace.Name, "foo"; e != r {
		t.Errorf("%#v != %#v.", e, r)
	}
}
func TestListControllers(t *testing.T) {
	ns := api.NamespaceAll
	c := &testClient{
		Request: testRequest{
			Method: "GET",
			Path:   testapi.ResourcePath(getRCResourceName(), ns, ""),
		},
		Response: Response{StatusCode: 200,
			Body: &api.ReplicationControllerList{
				Items: []api.ReplicationController{
					{
						ObjectMeta: api.ObjectMeta{
							Name: "foo",
							Labels: map[string]string{
								"foo":  "bar",
								"name": "baz",
							},
						},
						Spec: api.ReplicationControllerSpec{
							Replicas: 2,
							Template: &api.PodTemplateSpec{},
						},
					},
				},
			},
		},
	}
	receivedControllerList, err := c.Setup().ReplicationControllers(ns).List(labels.Everything())
	c.Validate(t, receivedControllerList, err)

}
예제 #13
0
func (d *PodDescriber) Describe(namespace, name string) (string, error) {
	rc := d.ReplicationControllers(namespace)
	pc := d.Pods(namespace)

	pod, err := pc.Get(name)
	if err != nil {
		eventsInterface := d.Events(namespace)
		events, err2 := eventsInterface.List(
			labels.Everything(),
			eventsInterface.GetFieldSelector(&name, &namespace, nil, nil))
		if err2 == nil && len(events.Items) > 0 {
			return tabbedString(func(out io.Writer) error {
				fmt.Fprintf(out, "Pod '%v': error '%v', but found events.\n", name, err)
				DescribeEvents(events, out)
				return nil
			})
		}
		return "", err
	}

	var events *api.EventList
	if ref, err := api.GetReference(pod); err != nil {
		glog.Errorf("Unable to construct reference to '%#v': %v", pod, err)
	} else {
		ref.Kind = ""
		events, _ = d.Events(namespace).Search(ref)
	}

	rcs, err := getReplicationControllersForLabels(rc, labels.Set(pod.Labels))
	if err != nil {
		return "", err
	}

	return describePod(pod, rcs, events)
}
예제 #14
0
func validate(f Framework, svcNameWant, rcNameWant string, ingress api.LoadBalancerIngress, podsWant int) error {
	Logf("Beginning cluster validation")
	// Verify RC.
	rcs, err := f.Client.ReplicationControllers(f.Namespace.Name).List(labels.Everything())
	if err != nil {
		return fmt.Errorf("error listing RCs: %v", err)
	}
	if len(rcs.Items) != 1 {
		return fmt.Errorf("wanted 1 RC with name %s, got %d", rcNameWant, len(rcs.Items))
	}
	if got := rcs.Items[0].Name; got != rcNameWant {
		return fmt.Errorf("wanted RC name %q, got %q", rcNameWant, got)
	}

	// Verify pods.
	if err := verifyPods(f.Client, f.Namespace.Name, rcNameWant, false, podsWant); err != nil {
		return fmt.Errorf("failed to find %d %q pods: %v", podsWant, rcNameWant, err)
	}

	// Verify service.
	svc, err := f.Client.Services(f.Namespace.Name).Get(svcNameWant)
	if err != nil {
		return fmt.Errorf("error getting service %s: %v", svcNameWant, err)
	}
	if svcNameWant != svc.Name {
		return fmt.Errorf("wanted service name %q, got %q", svcNameWant, svc.Name)
	}
	// TODO(mbforbes): Make testLoadBalancerReachable return an error.
	testLoadBalancerReachable(ingress, 80)

	Logf("Cluster validation succeeded")
	return nil
}
예제 #15
0
// reconciler action factory, performs explicit task reconciliation for non-terminal
// tasks identified by annotations in the QingYuan pod registry.
func (k *QingYuanScheduler) makePodRegistryReconciler() ReconcilerAction {
	return ReconcilerAction(func(drv bindings.SchedulerDriver, cancel <-chan struct{}) <-chan error {
		ctx := api.NewDefaultContext()
		podList, err := k.client.Pods(api.NamespaceValue(ctx)).List(labels.Everything(), fields.Everything())
		if err != nil {
			return proc.ErrorChanf("failed to reconcile pod registry: %v", err)
		}
		taskToSlave := make(map[string]string)
		for _, pod := range podList.Items {
			if len(pod.Annotations) == 0 {
				continue
			}
			taskId, found := pod.Annotations[meta.TaskIdKey]
			if !found {
				continue
			}
			slaveId, found := pod.Annotations[meta.SlaveIdKey]
			if !found {
				continue
			}
			taskToSlave[taskId] = slaveId
		}
		return proc.ErrorChan(k.explicitlyReconcileTasks(drv, taskToSlave, cancel))
	})
}
예제 #16
0
func TestEtcdListPersistentVolumes(t *testing.T) {
	ctx := api.NewContext()
	storage, _, fakeClient, _ := newStorage(t)
	key := storage.KeyRootFunc(ctx)
	key = etcdtest.AddPrefix(key)
	fakeClient.Data[key] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Nodes: []*etcd.Node{
					{
						Value: runtime.EncodeOrDie(latest.Codec, validNewPersistentVolume("foo")),
					},
					{
						Value: runtime.EncodeOrDie(latest.Codec, validNewPersistentVolume("bar")),
					},
				},
			},
		},
		E: nil,
	}

	pvObj, err := storage.List(ctx, labels.Everything(), fields.Everything())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	pvs := pvObj.(*api.PersistentVolumeList)

	if len(pvs.Items) != 2 || pvs.Items[0].Name != "foo" || pvs.Items[1].Name != "bar" {
		t.Errorf("Unexpected persistentVolume list: %#v", pvs)
	}
}
예제 #17
0
func TestEtcdList(t *testing.T) {
	registry, _, fakeClient, _ := newStorage(t)
	ctx := api.NewDefaultContext()
	key := registry.KeyRootFunc(ctx)
	key = etcdtest.AddPrefix(key)
	fakeClient.Data[key] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Nodes: []*etcd.Node{
					{
						Value: runtime.EncodeOrDie(latest.Codec, &api.ResourceQuota{
							ObjectMeta: api.ObjectMeta{Name: "foo"},
						}),
					},
					{
						Value: runtime.EncodeOrDie(latest.Codec, &api.ResourceQuota{
							ObjectMeta: api.ObjectMeta{Name: "bar"},
						}),
					},
				},
			},
		},
		E: nil,
	}
	obj, err := registry.List(ctx, labels.Everything(), fields.Everything())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	resourcequotas := obj.(*api.ResourceQuotaList)

	if len(resourcequotas.Items) != 2 || resourcequotas.Items[0].Name != "foo" || resourcequotas.Items[1].Name != "bar" {
		t.Errorf("Unexpected resourcequota list: %#v", resourcequotas)
	}
}
예제 #18
0
func TestListEmptyResourceQuotaList(t *testing.T) {
	fakeEtcdClient, helper := newHelper(t)
	fakeEtcdClient.ChangeIndex = 1
	storage, _ := NewStorage(helper)
	ctx := api.NewContext()
	key := storage.Etcd.KeyRootFunc(ctx)
	key = etcdtest.AddPrefix(key)

	fakeEtcdClient.Data[key] = tools.EtcdResponseWithError{
		R: &etcd.Response{},
		E: fakeEtcdClient.NewError(tools.EtcdErrorCodeNotFound),
	}

	resourcequotas, err := storage.List(api.NewContext(), labels.Everything(), fields.Everything())
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if len(resourcequotas.(*api.ResourceQuotaList).Items) != 0 {
		t.Errorf("Unexpected non-zero resourcequota list: %#v", resourcequotas)
	}
	if resourcequotas.(*api.ResourceQuotaList).ResourceVersion != "1" {
		t.Errorf("Unexpected resource version: %#v", resourcequotas)
	}
}
예제 #19
0
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
	start := time.Now()
	defer func() {
		glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
	}()
	hostField := fields.OneTermEqualSelector(client.PodHost, host)
	pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField)
	if err != nil || len(pods.Items) == numPods {
		return err
	}
	glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
	// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest.
	controller := RCFromManifest(TestRCManifest)

	// Make the rc unique to the given host.
	controller.Spec.Replicas = numPods
	controller.Spec.Template.Spec.NodeName = host
	controller.Name = controller.Name + host
	controller.Spec.Selector["host"] = host
	controller.Spec.Template.Labels["host"] = host

	if rc, err := StartRC(controller, restClient); err != nil {
		return err
	} else {
		// Delete the rc, otherwise when we restart master components for the next benchmark
		// the rc controller will race with the pods controller in the rc manager.
		return restClient.ReplicationControllers(TestNS).Delete(rc.Name)
	}
}
예제 #20
0
func CheckCadvisorHealthOnAllNodes(c *client.Client, timeout time.Duration) {
	By("getting list of nodes")
	nodeList, err := c.Nodes().List(labels.Everything(), fields.Everything())
	expectNoError(err)
	var errors []error
	retries := maxRetries
	for {
		errors = []error{}
		for _, node := range nodeList.Items {
			// cadvisor is not accessible directly unless its port (4194 by default) is exposed.
			// Here, we access '/stats/' REST endpoint on the qinglet which polls cadvisor internally.
			statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name)
			By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource))
			_, err = c.Get().AbsPath(statsResource).Timeout(timeout).Do().Raw()
			if err != nil {
				errors = append(errors, err)
			}
		}
		if len(errors) == 0 {
			return
		}
		if retries--; retries <= 0 {
			break
		}
		Logf("failed to retrieve qinglet stats -\n %v", errors)
		time.Sleep(sleepDuration)
	}
	Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors)
}
예제 #21
0
func testReboot(c *client.Client, rebootCmd string) {
	// This test requires SSH, so the provider check should be identical to
	// there (the limiting factor is the implementation of util.go's
	// getSigner(...)).
	provider := testContext.Provider
	if !providerIs("aws", "gce") {
		By(fmt.Sprintf("Skipping reboot test, which is not implemented for %s", provider))
		return
	}

	// Get all nodes, and kick off the test on each.
	nodelist, err := listNodes(c, labels.Everything(), fields.Everything())
	if err != nil {
		Failf("Error getting nodes: %v", err)
	}
	result := make(chan bool, len(nodelist.Items))
	for _, n := range nodelist.Items {
		go rebootNode(c, provider, n.ObjectMeta.Name, rebootCmd, result)
	}

	// Wait for all to finish and check the final result.
	failed := false
	// TODO(mbforbes): Change to `for range` syntax and remove logging once
	// we support only Go >= 1.4.
	for _, n := range nodelist.Items {
		if !<-result {
			Failf("Node %s failed reboot test.", n.ObjectMeta.Name)
			failed = true
		}
	}
	if failed {
		Failf("Test failed; at least one node failed to reboot in the time given.")
	}
}
예제 #22
0
func TestEtcdWatchServices(t *testing.T) {
	ctx := api.NewDefaultContext()
	fakeClient := tools.NewFakeEtcdClient(t)
	registry := NewTestEtcdRegistry(fakeClient)
	watching, err := registry.WatchServices(ctx,
		labels.Everything(),
		fields.SelectorFromSet(fields.Set{"name": "foo"}),
		"1",
	)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	select {
	case _, ok := <-watching.ResultChan():
		if !ok {
			t.Errorf("watching channel should be open")
		}
	default:
	}
	fakeClient.WatchInjectError <- nil
	if _, ok := <-watching.ResultChan(); ok {
		t.Errorf("watching channel should be closed")
	}
	watching.Stop()
}
예제 #23
0
func (ks *QingYuanScheduler) recoverTasks() error {
	ctx := api.NewDefaultContext()
	podList, err := ks.client.Pods(api.NamespaceValue(ctx)).List(labels.Everything(), fields.Everything())
	if err != nil {
		log.V(1).Infof("failed to recover pod registry, madness may ensue: %v", err)
		return err
	}
	recoverSlave := func(t *podtask.T) {

		slaveId := t.Spec.SlaveID
		ks.slaves.checkAndAdd(slaveId, t.Offer.Host())
	}
	for _, pod := range podList.Items {
		if t, ok, err := podtask.RecoverFrom(pod); err != nil {
			log.Errorf("failed to recover task from pod, will attempt to delete '%v/%v': %v", pod.Namespace, pod.Name, err)
			err := ks.client.Pods(pod.Namespace).Delete(pod.Name, nil)
			//TODO(jdef) check for temporary or not-found errors
			if err != nil {
				log.Errorf("failed to delete pod '%v/%v': %v", pod.Namespace, pod.Name, err)
			}
		} else if ok {
			ks.taskRegistry.Register(t, nil)
			recoverSlave(t)
			log.Infof("recovered task %v from pod %v/%v", t.ID, pod.Namespace, pod.Name)
		}
	}
	return nil
}
예제 #24
0
func TestEtcdWatchEndpointsAcrossNamespaces(t *testing.T) {
	ctx := api.NewContext()
	fakeClient := tools.NewFakeEtcdClient(t)
	registry := NewTestEtcdRegistryWithPods(fakeClient)
	watching, err := registry.endpoints.WatchEndpoints(
		ctx,
		labels.Everything(),
		fields.Everything(),
		"1",
	)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	select {
	case _, ok := <-watching.ResultChan():
		if !ok {
			t.Errorf("watching channel should be open")
		}
	default:
	}
	fakeClient.WatchInjectError <- nil
	if _, ok := <-watching.ResultChan(); ok {
		t.Errorf("watching channel should be closed")
	}
	watching.Stop()
}
예제 #25
0
func (d *NamespaceDescriber) Describe(namespace, name string) (string, error) {
	ns, err := d.Namespaces().Get(name)
	if err != nil {
		return "", err
	}
	resourceQuotaList, err := d.ResourceQuotas(name).List(labels.Everything())
	if err != nil {
		return "", err
	}
	limitRangeList, err := d.LimitRanges(name).List(labels.Everything())
	if err != nil {
		return "", err
	}

	return describeNamespace(ns, resourceQuotaList, limitRangeList)
}
예제 #26
0
func (s *ServiceController) createExternalLoadBalancer(service *api.Service) error {
	ports, err := getPortsForLB(service)
	if err != nil {
		return err
	}
	nodes, err := s.qingClient.Nodes().List(labels.Everything(), fields.Everything())
	if err != nil {
		return err
	}
	name := s.loadBalancerName(service)
	if len(service.Spec.DeprecatedPublicIPs) > 0 {
		for _, publicIP := range service.Spec.DeprecatedPublicIPs {
			// TODO: Make this actually work for multiple IPs by using different
			// names for each. For now, we'll just create the first and break.
			status, err := s.balancer.CreateTCPLoadBalancer(name, s.zone.Region, net.ParseIP(publicIP),
				ports, hostsFromNodeList(nodes), service.Spec.SessionAffinity)
			if err != nil {
				return err
			} else {
				service.Status.LoadBalancer = *status
			}
			break
		}
	} else {
		status, err := s.balancer.CreateTCPLoadBalancer(name, s.zone.Region, nil,
			ports, hostsFromNodeList(nodes), service.Spec.SessionAffinity)
		if err != nil {
			return err
		} else {
			service.Status.LoadBalancer = *status
		}
	}
	return nil
}
예제 #27
0
func (d *NodeDescriber) Describe(namespace, name string) (string, error) {
	mc := d.Nodes()
	node, err := mc.Get(name)
	if err != nil {
		return "", err
	}

	var pods []*api.Pod
	allPods, err := d.Pods(namespace).List(labels.Everything(), fields.Everything())
	if err != nil {
		return "", err
	}
	for i := range allPods.Items {
		pod := &allPods.Items[i]
		if pod.Spec.NodeName != name {
			continue
		}
		pods = append(pods, pod)
	}

	var events *api.EventList
	if ref, err := api.GetReference(node); err != nil {
		glog.Errorf("Unable to construct reference to '%#v': %v", node, err)
	} else {
		// TODO: We haven't decided the namespace for Node object yet.
		ref.UID = types.UID(ref.Name)
		events, _ = d.Events("").Search(ref)
	}

	return describeNode(node, pods, events)
}
예제 #28
0
func TestEtcdWatchPods(t *testing.T) {
	registry, _, _, fakeClient, _ := newStorage(t)
	ctx := api.NewDefaultContext()
	watching, err := registry.Watch(ctx,
		labels.Everything(),
		fields.Everything(),
		"1",
	)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	select {
	case _, ok := <-watching.ResultChan():
		if !ok {
			t.Errorf("watching channel should be open")
		}
	default:
	}
	fakeClient.WatchInjectError <- nil
	if _, ok := <-watching.ResultChan(); ok {
		t.Errorf("watching channel should be closed")
	}
	watching.Stop()
}
예제 #29
0
func TestServiceRegistryList(t *testing.T) {
	ctx := api.NewDefaultContext()
	storage, registry := NewTestREST(t, nil)
	registry.CreateService(ctx, &api.Service{
		ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault},
		Spec: api.ServiceSpec{
			Selector: map[string]string{"bar": "baz"},
		},
	})
	registry.CreateService(ctx, &api.Service{
		ObjectMeta: api.ObjectMeta{Name: "foo2", Namespace: api.NamespaceDefault},
		Spec: api.ServiceSpec{
			Selector: map[string]string{"bar2": "baz2"},
		},
	})
	registry.List.ResourceVersion = "1"
	s, _ := storage.List(ctx, labels.Everything(), fields.Everything())
	sl := s.(*api.ServiceList)
	if len(sl.Items) != 2 {
		t.Fatalf("Expected 2 services, but got %v", len(sl.Items))
	}
	if e, a := "foo", sl.Items[0].Name; e != a {
		t.Errorf("Expected %v, but got %v", e, a)
	}
	if e, a := "foo2", sl.Items[1].Name; e != a {
		t.Errorf("Expected %v, but got %v", e, a)
	}
	if sl.ResourceVersion != "1" {
		t.Errorf("Unexpected resource version: %#v", sl)
	}
}
예제 #30
0
func (s *storage) ListEndpoints(ctx api.Context) (*api.EndpointsList, error) {
	obj, err := s.List(ctx, labels.Everything(), fields.Everything())
	if err != nil {
		return nil, err
	}
	return obj.(*api.EndpointsList), nil
}