// Create creates an ImportController.
func (f *ImportControllerFactory) Create() controller.RunnableController {
	lw := &cache.ListWatch{
		ListFunc: func() (runtime.Object, error) {
			return f.Client.ImageStreams(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return f.Client.ImageStreams(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	q := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(lw, &api.ImageStream{}, q, 2*time.Minute).Run()

	c := &ImportController{
		client:   dockerregistry.NewClient(),
		streams:  f.Client,
		mappings: f.Client,
	}

	return &controller.RetryController{
		Queue: q,
		RetryManager: controller.NewQueueRetryManager(
			q,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				util.HandleError(err)
				return retries.Count < 5
			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			r := obj.(*api.ImageStream)
			return c.Next(r)
		},
	}
}
Beispiel #2
0
func RunProjectCache(c client.Interface, defaultNodeSelector string) {
	if pcache != nil {
		return
	}

	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return c.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&kapi.Namespace{},
		store,
		0,
	)
	reflector.Run()
	pcache = &ProjectCache{
		Client:              c,
		Store:               store,
		DefaultNodeSelector: defaultNodeSelector,
	}
}
func watchNodes(client *client.Client) {
	nodeList, err := client.Nodes().List(labels.Everything(), fields.Everything())
	if err != nil {
		log.Fatal(err)
	}
	nodes := nodeList.Items
	writeNodeTargetsFile(nodes)
	watcher, err := client.Nodes().Watch(labels.Everything(), fields.Everything(), nodeList.ResourceVersion)
	if err != nil {
		log.Fatal(err)
	}

	for event := range watcher.ResultChan() {
		switch event.Type {
		case watch.Added:
			switch obj := event.Object.(type) {
			case *api.Node:
				nodes = append(nodes, *obj)
			}
			writeNodeTargetsFile(nodes)
		case watch.Deleted:
			switch obj := event.Object.(type) {
			case *api.Node:
				index := findNodeIndexInSlice(nodes, obj)
				nodes = append(nodes[:index], nodes[index+1:]...)
			}
			writeNodeTargetsFile(nodes)
		}
	}
}
Beispiel #4
0
func TestEtcdWatchEndpointsAcrossNamespaces(t *testing.T) {
	ctx := api.NewContext()
	fakeClient := tools.NewFakeEtcdClient(t)
	registry := NewTestEtcdRegistry(fakeClient)
	watching, err := registry.WatchEndpoints(
		ctx,
		labels.Everything(),
		labels.Everything(),
		"1",
	)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	select {
	case _, ok := <-watching.ResultChan():
		if !ok {
			t.Errorf("watching channel should be open")
		}
	default:
	}
	fakeClient.WatchInjectError <- nil
	if _, ok := <-watching.ResultChan(); ok {
		t.Errorf("watching channel should be closed")
	}
	watching.Stop()
}
Beispiel #5
0
func newWatcher(kr *kregistry) (registry.Watcher, error) {
	svi := kr.client.Services(api.NamespaceAll)

	services, err := svi.List(labels.Everything())
	if err != nil {
		return nil, err
	}

	watch, err := svi.Watch(labels.Everything(), fields.Everything(), services.ResourceVersion)
	if err != nil {
		return nil, err
	}

	w := &watcher{
		registry: kr,
		watcher:  watch,
	}

	go func() {
		for event := range watch.ResultChan() {
			w.update(event)
		}
	}()

	return w, nil
}
// resourceVersion is a pointer to the resource version to use/update.
func (rm *ReplicationManager) watchControllers(resourceVersion *uint64) {
	watching, err := rm.kubeClient.WatchReplicationControllers(
		labels.Everything(),
		labels.Everything(),
		*resourceVersion,
	)
	if err != nil {
		glog.Errorf("Unexpected failure to watch: %v", err)
		time.Sleep(5 * time.Second)
		return
	}

	for {
		select {
		case <-rm.syncTime:
			rm.synchronize()
		case event, open := <-watching.ResultChan():
			if !open {
				// watchChannel has been closed, or something else went
				// wrong with our etcd watch call. Let the util.Forever()
				// that called us call us again.
				return
			}
			glog.Infof("Got watch: %#v", event)
			if rc, ok := event.Object.(*api.ReplicationController); !ok {
				glog.Errorf("unexpected object: %#v", event.Object)
			} else {
				// If we get disconnected, start where we left off.
				*resourceVersion = rc.ResourceVersion + 1
				rm.syncHandler(*rc)
			}
		}
	}
}
func NewFirstContainerReady(kclient kclient.Interface, timeout time.Duration, interval time.Duration) *FirstContainerReady {
	return &FirstContainerReady{
		timeout:  timeout,
		interval: interval,
		podsForDeployment: func(deployment *kapi.ReplicationController) (*kapi.PodList, error) {
			selector := labels.Set(deployment.Spec.Selector).AsSelector()
			return kclient.Pods(deployment.Namespace).List(selector, fields.Everything())
		},
		getPodStore: func(namespace, name string) (cache.Store, chan struct{}) {
			sel, _ := fields.ParseSelector("metadata.name=" + name)
			store := cache.NewStore(cache.MetaNamespaceKeyFunc)
			lw := &deployutil.ListWatcherImpl{
				ListFunc: func() (runtime.Object, error) {
					return kclient.Pods(namespace).List(labels.Everything(), sel)
				},
				WatchFunc: func(resourceVersion string) (watch.Interface, error) {
					return kclient.Pods(namespace).Watch(labels.Everything(), sel, resourceVersion)
				},
			}
			stop := make(chan struct{})
			cache.NewReflector(lw, &kapi.Pod{}, store, 10*time.Second).RunUntil(stop)
			return store, stop
		},
	}
}
// NewNamespaceManager creates a new NamespaceManager
func NewNamespaceManager(kubeClient client.Interface, resyncPeriod time.Duration) *NamespaceManager {
	_, controller := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.Namespace{},
		resyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				namespace := obj.(*api.Namespace)
				syncNamespace(kubeClient, *namespace)
			},
			UpdateFunc: func(oldObj, newObj interface{}) {
				namespace := newObj.(*api.Namespace)
				syncNamespace(kubeClient, *namespace)
			},
		},
	)

	return &NamespaceManager{
		controller: controller,
	}
}
// NewDockercfgController returns a new *DockercfgController.
func NewDockercfgController(cl client.Interface, options DockercfgControllerOptions) *DockercfgController {
	e := &DockercfgController{
		client: cl,
	}

	_, e.serviceAccountController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&api.ServiceAccount{},
		options.Resync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.serviceAccountAdded,
			UpdateFunc: e.serviceAccountUpdated,
		},
	)

	e.dockerURL = options.DefaultDockerURL

	return e
}
func TestRESTWatch(t *testing.T) {
	eventA := &api.Event{
		InvolvedObject: api.ObjectReference{
			Kind:            "Pod",
			Name:            "foo",
			UID:             "long uid string",
			APIVersion:      testapi.Version(),
			ResourceVersion: "0",
			FieldPath:       "",
		},
		Reason: "ForTesting",
	}
	reg, rest := NewTestREST()
	wi, err := rest.Watch(api.NewContext(), labels.Everything(), labels.Everything(), "0")
	if err != nil {
		t.Fatalf("Unexpected error %v", err)
	}
	go func() {
		reg.Broadcaster.Action(watch.Added, eventA)
	}()
	got := <-wi.ResultChan()
	if e, a := eventA, got.Object; !reflect.DeepEqual(e, a) {
		t.Errorf("diff: %s", util.ObjectDiff(e, a))
	}
}
Beispiel #11
0
func TestEndpointsRegistryList(t *testing.T) {
	registry := registrytest.NewServiceRegistry()
	storage := NewREST(registry)
	registry.EndpointsList = api.EndpointsList{
		JSONBase: api.JSONBase{ResourceVersion: 1},
		Items: []api.Endpoints{
			{JSONBase: api.JSONBase{ID: "foo"}},
			{JSONBase: api.JSONBase{ID: "bar"}},
		},
	}
	s, _ := storage.List(labels.Everything(), labels.Everything())
	sl := s.(*api.EndpointsList)
	if len(sl.Items) != 2 {
		t.Fatalf("Expected 2 endpoints, but got %v", len(sl.Items))
	}
	if e, a := "foo", sl.Items[0].ID; e != a {
		t.Errorf("Expected %v, but got %v", e, a)
	}
	if e, a := "bar", sl.Items[1].ID; e != a {
		t.Errorf("Expected %v, but got %v", e, a)
	}
	if sl.ResourceVersion != 1 {
		t.Errorf("Unexpected resource version: %#v", sl)
	}
}
Beispiel #12
0
func TestServiceRegistryList(t *testing.T) {
	registry := registrytest.NewServiceRegistry()
	fakeCloud := &cloud.FakeCloud{}
	machines := []string{"foo", "bar", "baz"}
	storage := NewREST(registry, fakeCloud, minion.NewRegistry(machines))
	registry.CreateService(&api.Service{
		JSONBase: api.JSONBase{ID: "foo"},
		Selector: map[string]string{"bar": "baz"},
	})
	registry.CreateService(&api.Service{
		JSONBase: api.JSONBase{ID: "foo2"},
		Selector: map[string]string{"bar2": "baz2"},
	})
	registry.List.ResourceVersion = 1
	s, _ := storage.List(labels.Everything(), labels.Everything())
	sl := s.(*api.ServiceList)
	if len(fakeCloud.Calls) != 0 {
		t.Errorf("Unexpected call(s): %#v", fakeCloud.Calls)
	}
	if len(sl.Items) != 2 {
		t.Fatalf("Expected 2 services, but got %v", len(sl.Items))
	}
	if e, a := "foo", sl.Items[0].ID; e != a {
		t.Errorf("Expected %v, but got %v", e, a)
	}
	if e, a := "foo2", sl.Items[1].ID; e != a {
		t.Errorf("Expected %v, but got %v", e, a)
	}
	if sl.ResourceVersion != 1 {
		t.Errorf("Unexpected resource version: %#v", sl)
	}
}
Beispiel #13
0
func (oi *OsdnRegistryInterface) WatchMinions(receiver chan *osdnapi.MinionEvent, stop chan bool) error {
	minionEventQueue := oscache.NewEventQueue(cache.MetaNamespaceKeyFunc)
	listWatch := &cache.ListWatch{
		ListFunc: func() (runtime.Object, error) {
			return oi.kClient.Nodes().List(labels.Everything(), fields.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return oi.kClient.Nodes().Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	cache.NewReflector(listWatch, &kapi.Node{}, minionEventQueue, 4*time.Minute).Run()

	for {
		eventType, obj, err := minionEventQueue.Pop()
		if err != nil {
			return err
		}
		switch eventType {
		case watch.Added:
			// we should ignore the modified event because status updates cause unnecessary noise
			// the only time we would care about modified would be if the minion changes its IP address
			// and hence all nodes need to update their vtep entries for the respective subnet
			// create minionEvent
			node := obj.(*kapi.Node)
			receiver <- &osdnapi.MinionEvent{Type: osdnapi.Added, Minion: node.ObjectMeta.Name}
		case watch.Deleted:
			// TODO: There is a chance that a Delete event will not get triggered.
			// Need to use a periodic sync loop that lists and compares.
			node := obj.(*kapi.Node)
			receiver <- &osdnapi.MinionEvent{Type: osdnapi.Deleted, Minion: node.ObjectMeta.Name}
		}
	}
	return nil
}
Beispiel #14
0
// run loops forever looking for changes to endpoints.
func (s *endpointsReflector) run(resourceVersion *string) {
	if len(*resourceVersion) == 0 {
		endpoints, err := s.watcher.List(labels.Everything())
		if err != nil {
			glog.Errorf("Unable to load endpoints: %v", err)
			time.Sleep(wait.Jitter(s.waitDuration, 0.0))
			return
		}
		*resourceVersion = endpoints.ResourceVersion
		s.endpoints <- EndpointsUpdate{Op: SET, Endpoints: endpoints.Items}
	}

	watcher, err := s.watcher.Watch(labels.Everything(), fields.Everything(), *resourceVersion)
	if err != nil {
		glog.Errorf("Unable to watch for endpoints changes: %v", err)
		if !client.IsTimeout(err) {
			// Reset so that we do a fresh get request
			*resourceVersion = ""
		}

		time.Sleep(wait.Jitter(s.waitDuration, 0.0))
		return
	}
	defer watcher.Stop()

	ch := watcher.ResultChan()
	s.watchHandler(resourceVersion, ch, s.endpoints)
}
func TestListPodsCacheError(t *testing.T) {
	podRegistry := registrytest.NewPodRegistry(nil)
	podRegistry.Pods = &api.PodList{
		Items: []api.Pod{
			{
				ObjectMeta: api.ObjectMeta{
					Name: "foo",
				},
			},
		},
	}
	storage := REST{
		registry: podRegistry,
		podCache: &fakeCache{errorToReturn: client.ErrPodInfoNotAvailable},
	}
	ctx := api.NewContext()
	pods, err := storage.List(ctx, labels.Everything(), labels.Everything())
	if err != nil {
		t.Fatalf("Expected no error, got %#v", err)
	}
	pl := pods.(*api.PodList)
	if len(pl.Items) != 1 {
		t.Fatalf("Unexpected 0-len pod list: %+v", pl)
	}
	if e, a := api.PodUnknown, pl.Items[0].Status.Phase; e != a {
		t.Errorf("Expected %v, got %v", e, a)
	}
}
Beispiel #16
0
// run loops forever looking for changes to services.
func (s *servicesReflector) run(resourceVersion *string) {
	if len(*resourceVersion) == 0 {
		services, err := s.watcher.List(labels.Everything())
		if err != nil {
			glog.Errorf("Unable to load services: %v", err)
			// TODO: reconcile with pkg/client/cache which doesn't use reflector.
			time.Sleep(wait.Jitter(s.waitDuration, 0.0))
			return
		}
		*resourceVersion = services.ResourceVersion
		// TODO: replace with code to update the
		s.services <- ServiceUpdate{Op: SET, Services: services.Items}
	}

	watcher, err := s.watcher.Watch(labels.Everything(), fields.Everything(), *resourceVersion)
	if err != nil {
		glog.Errorf("Unable to watch for services changes: %v", err)
		if !client.IsTimeout(err) {
			// Reset so that we do a fresh get request
			*resourceVersion = ""
		}
		time.Sleep(wait.Jitter(s.waitDuration, 0.0))
		return
	}
	defer watcher.Stop()

	ch := watcher.ResultChan()
	s.watchHandler(resourceVersion, ch, s.services)
}
Beispiel #17
0
func TestListPodList(t *testing.T) {
	podRegistry := registrytest.NewPodRegistry(nil)
	podRegistry.Pods = &api.PodList{
		Items: []api.Pod{
			{
				JSONBase: api.JSONBase{
					ID: "foo",
				},
			},
			{
				JSONBase: api.JSONBase{
					ID: "bar",
				},
			},
		},
	}
	storage := REST{
		registry: podRegistry,
	}
	podsObj, err := storage.List(labels.Everything(), labels.Everything())
	pods := podsObj.(*api.PodList)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	if len(pods.Items) != 2 {
		t.Errorf("Unexpected pod list: %#v", pods)
	}
	if pods.Items[0].ID != "foo" {
		t.Errorf("Unexpected pod: %#v", pods.Items[0])
	}
	if pods.Items[1].ID != "bar" {
		t.Errorf("Unexpected pod: %#v", pods.Items[1])
	}
}
Beispiel #18
0
func (a *DefaultRuleResolver) GetRoleBindings(ctx kapi.Context) ([]authorizationinterfaces.RoleBinding, error) {
	namespace := kapi.NamespaceValue(ctx)

	if len(namespace) == 0 {
		policyBindingList, err := a.clusterBindingLister.ListClusterPolicyBindings(ctx, labels.Everything(), fields.Everything())
		if err != nil {
			return nil, err
		}

		ret := make([]authorizationinterfaces.RoleBinding, 0, len(policyBindingList.Items))
		for _, policyBinding := range policyBindingList.Items {
			for _, value := range policyBinding.RoleBindings {
				ret = append(ret, authorizationinterfaces.NewClusterRoleBindingAdapter(value))
			}
		}
		return ret, nil
	}

	policyBindingList, err := a.bindingLister.ListPolicyBindings(ctx, labels.Everything(), fields.Everything())
	if err != nil {
		return nil, err
	}

	ret := make([]authorizationinterfaces.RoleBinding, 0, len(policyBindingList.Items))
	for _, policyBinding := range policyBindingList.Items {
		for _, value := range policyBinding.RoleBindings {
			ret = append(ret, authorizationinterfaces.NewLocalRoleBindingAdapter(value))
		}
	}
	return ret, nil
}
Beispiel #19
0
func TestEndpointsRegistryList(t *testing.T) {
	registry := registrytest.NewServiceRegistry()
	storage := NewREST(registry)
	registry.EndpointsList = api.EndpointsList{
		ListMeta: api.ListMeta{ResourceVersion: "1"},
		Items: []api.Endpoints{
			{ObjectMeta: api.ObjectMeta{Name: "foo"}},
			{ObjectMeta: api.ObjectMeta{Name: "bar"}},
		},
	}
	ctx := api.NewContext()
	s, _ := storage.List(ctx, labels.Everything(), labels.Everything())
	sl := s.(*api.EndpointsList)
	if len(sl.Items) != 2 {
		t.Fatalf("Expected 2 endpoints, but got %v", len(sl.Items))
	}
	if e, a := "foo", sl.Items[0].Name; e != a {
		t.Errorf("Expected %v, but got %v", e, a)
	}
	if e, a := "bar", sl.Items[1].Name; e != a {
		t.Errorf("Expected %v, but got %v", e, a)
	}
	if sl.ResourceVersion != "1" {
		t.Errorf("Unexpected resource version: %#v", sl)
	}
}
// Simplified version of RunRC, that does not create RC, but creates plain Pods and
// requires passing whole Pod definition, which is needed to test various Scheduler predicates.
func startPods(c *client.Client, replicas int, ns string, podNamePrefix string, pod api.Pod) {
	pods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
	expectNoError(err)
	podsRunningBefore := len(pods.Items)

	for i := 0; i < replicas; i++ {
		podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
		pod.ObjectMeta.Name = podName
		pod.ObjectMeta.Labels["name"] = podName
		pod.Spec.Containers[0].Name = podName
		_, err = c.Pods(ns).Create(&pod)
		expectNoError(err)
	}

	// Wait for pods to start running.
	timeout := 2 * time.Minute
	startTime := time.Now()
	currentlyRunningPods := 0
	for podsRunningBefore+replicas != currentlyRunningPods {
		allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
		expectNoError(err)
		runningPods := 0
		for _, pod := range allPods.Items {
			if pod.Status.Phase == api.PodRunning {
				runningPods += 1
			}
		}
		currentlyRunningPods = runningPods
		if startTime.Add(timeout).Before(time.Now()) {
			break
		}
		time.Sleep(5 * time.Second)
	}
	Expect(currentlyRunningPods).To(Equal(podsRunningBefore + replicas))
}
Beispiel #21
0
func NewReadOnlyClusterPolicyCache(registry clusterpolicyregistry.WatchingRegistry) readOnlyClusterPolicyCache {
	ctx := kapi.WithNamespace(kapi.NewContext(), kapi.NamespaceAll)

	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})

	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return registry.ListClusterPolicies(ctx, labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return registry.WatchClusterPolicies(ctx, labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&authorizationapi.ClusterPolicy{},
		indexer,
		2*time.Minute,
	)

	return readOnlyClusterPolicyCache{
		registry:  registry,
		indexer:   indexer,
		reflector: *reflector,

		keyFunc: cache.MetaNamespaceKeyFunc,
	}
}
func TestNamespaceWatch(t *testing.T) {
	c := &testClient{
		Request:  testRequest{Method: "GET", Path: "/watch/namespaces", Query: url.Values{"resourceVersion": []string{}}},
		Response: Response{StatusCode: 200},
	}
	_, err := c.Setup().Namespaces().Watch(labels.Everything(), labels.Everything(), "")
	c.Validate(t, nil, err)
}
// List lists all the nodes in the cluster.
func (n *nodeAdaptor) List() (*api.NodeList, error) {
	ctx := api.NewContext()
	obj, err := n.storage.(apiserver.RESTLister).List(ctx, labels.Everything(), labels.Everything())
	if err != nil {
		return nil, err
	}
	return obj.(*api.NodeList), nil
}
Beispiel #24
0
func TestMinionREST(t *testing.T) {
	m := NewRegistry([]string{"foo", "bar"})
	ms := NewREST(m)

	if obj, err := ms.Get("foo"); err != nil || obj.(*api.Minion).ID != "foo" {
		t.Errorf("missing expected object")
	}
	if obj, err := ms.Get("bar"); err != nil || obj.(*api.Minion).ID != "bar" {
		t.Errorf("missing expected object")
	}
	if _, err := ms.Get("baz"); err != ErrDoesNotExist {
		t.Errorf("has unexpected object")
	}

	c, err := ms.Create(&api.Minion{JSONBase: api.JSONBase{ID: "baz"}})
	if err != nil {
		t.Errorf("insert failed")
	}
	obj := <-c
	if m, ok := obj.(*api.Minion); !ok || m.ID != "baz" {
		t.Errorf("insert return value was weird: %#v", obj)
	}
	if obj, err := ms.Get("baz"); err != nil || obj.(*api.Minion).ID != "baz" {
		t.Errorf("insert didn't actually insert")
	}

	c, err = ms.Delete("bar")
	if err != nil {
		t.Errorf("delete failed")
	}
	obj = <-c
	if s, ok := obj.(*api.Status); !ok || s.Status != api.StatusSuccess {
		t.Errorf("delete return value was weird: %#v", obj)
	}
	if _, err := ms.Get("bar"); err != ErrDoesNotExist {
		t.Errorf("delete didn't actually delete")
	}

	_, err = ms.Delete("bar")
	if err != ErrDoesNotExist {
		t.Errorf("delete returned wrong error")
	}

	list, err := ms.List(labels.Everything(), labels.Everything())
	if err != nil {
		t.Errorf("got error calling List")
	}
	expect := []api.Minion{
		{
			JSONBase: api.JSONBase{ID: "baz"},
		}, {
			JSONBase: api.JSONBase{ID: "foo"},
		},
	}
	if !reflect.DeepEqual(list.(*api.MinionList).Items, expect) {
		t.Errorf("Unexpected list value: %#v", list)
	}
}
Beispiel #25
0
func TestMinionREST(t *testing.T) {
	ms := NewREST(registrytest.NewMinionRegistry([]string{"foo", "bar"}, api.NodeResources{}))
	ctx := api.NewContext()
	if obj, err := ms.Get(ctx, "foo"); err != nil || obj.(*api.Minion).Name != "foo" {
		t.Errorf("missing expected object")
	}
	if obj, err := ms.Get(ctx, "bar"); err != nil || obj.(*api.Minion).Name != "bar" {
		t.Errorf("missing expected object")
	}
	if _, err := ms.Get(ctx, "baz"); err != ErrDoesNotExist {
		t.Errorf("has unexpected object")
	}

	c, err := ms.Create(ctx, &api.Minion{ObjectMeta: api.ObjectMeta{Name: "baz"}})
	if err != nil {
		t.Errorf("insert failed")
	}
	obj := <-c
	if m, ok := obj.Object.(*api.Minion); !ok || m.Name != "baz" {
		t.Errorf("insert return value was weird: %#v", obj)
	}
	if obj, err := ms.Get(ctx, "baz"); err != nil || obj.(*api.Minion).Name != "baz" {
		t.Errorf("insert didn't actually insert")
	}

	c, err = ms.Delete(ctx, "bar")
	if err != nil {
		t.Errorf("delete failed")
	}
	obj = <-c
	if s, ok := obj.Object.(*api.Status); !ok || s.Status != api.StatusSuccess {
		t.Errorf("delete return value was weird: %#v", obj)
	}
	if _, err := ms.Get(ctx, "bar"); err != ErrDoesNotExist {
		t.Errorf("delete didn't actually delete")
	}

	_, err = ms.Delete(ctx, "bar")
	if err != ErrDoesNotExist {
		t.Errorf("delete returned wrong error")
	}

	list, err := ms.List(ctx, labels.Everything(), labels.Everything())
	if err != nil {
		t.Errorf("got error calling List")
	}
	expect := []api.Minion{
		{
			ObjectMeta: api.ObjectMeta{Name: "foo"},
		}, {
			ObjectMeta: api.ObjectMeta{Name: "baz"},
		},
	}
	nodeList := list.(*api.MinionList)
	if len(expect) != len(nodeList.Items) || !contains(nodeList, "foo") || !contains(nodeList, "baz") {
		t.Errorf("Unexpected list value: %#v", list)
	}
}
Beispiel #26
0
// watchLoop loops forever looking for new events.  If an error occurs it will close the channel and return.
func watchLoop(eventClient kubeclient.EventInterface, eventsChan chan<- eventsUpdate, errorChan chan<- error) {
	defer close(eventsChan)
	defer close(errorChan)
	events, err := eventClient.List(kubelabels.Everything(), kubefields.Everything())
	if err != nil {
		glog.Errorf("Failed to load events: %v", err)
		errorChan <- err
		return
	}
	resourceVersion := events.ResourceVersion
	eventsChan <- eventsUpdate{events: events}

	watcher, err := eventClient.Watch(kubelabels.Everything(), kubefields.Everything(), resourceVersion)
	if err != nil {
		glog.Errorf("Failed to start watch for new events: %v", err)
		errorChan <- err
		return
	}
	defer watcher.Stop()

	watchChannel := watcher.ResultChan()
	for {
		watchUpdate, ok := <-watchChannel
		if !ok {
			err := errors.New("watchLoop channel closed")
			errorChan <- err
			return
		}

		if watchUpdate.Type == kubewatch.Error {
			if status, ok := watchUpdate.Object.(*kubeapi.Status); ok {
				err := fmt.Errorf("Error during watch: %#v", status)
				errorChan <- err
				return
			}
			err := fmt.Errorf("Received unexpected error: %#v", watchUpdate.Object)
			errorChan <- err
			return
		}

		if event, ok := watchUpdate.Object.(*kubeapi.Event); ok {

			switch watchUpdate.Type {
			case kubewatch.Added, kubewatch.Modified:
				eventsChan <- eventsUpdate{&kubeapi.EventList{Items: []kubeapi.Event{*event}}}
			case kubewatch.Deleted:
				// Deleted events are silently ignored
			default:
				err := fmt.Errorf("Unknown watchUpdate.Type: %#v", watchUpdate.Type)
				errorChan <- err
				return
			}
			resourceVersion = event.ResourceVersion
			continue
		}
	}
}
Beispiel #27
0
// Create creates a DeploymentConfigChangeController.
func (factory *DeploymentConfigChangeControllerFactory) Create() controller.RunnableController {
	deploymentConfigLW := &deployutil.ListWatcherImpl{
		ListFunc: func() (runtime.Object, error) {
			return factory.Client.DeploymentConfigs(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return factory.Client.DeploymentConfigs(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(deploymentConfigLW, &deployapi.DeploymentConfig{}, queue, 2*time.Minute).Run()

	eventBroadcaster := record.NewBroadcaster()
	eventBroadcaster.StartRecordingToSink(factory.KubeClient.Events(""))

	changeController := &DeploymentConfigChangeController{
		changeStrategy: &changeStrategyImpl{
			getDeploymentFunc: func(namespace, name string) (*kapi.ReplicationController, error) {
				return factory.KubeClient.ReplicationControllers(namespace).Get(name)
			},
			generateDeploymentConfigFunc: func(namespace, name string) (*deployapi.DeploymentConfig, error) {
				return factory.Client.DeploymentConfigs(namespace).Generate(name)
			},
			updateDeploymentConfigFunc: func(namespace string, config *deployapi.DeploymentConfig) (*deployapi.DeploymentConfig, error) {
				return factory.Client.DeploymentConfigs(namespace).Update(config)
			},
		},
		decodeConfig: func(deployment *kapi.ReplicationController) (*deployapi.DeploymentConfig, error) {
			return deployutil.DecodeDeploymentConfig(deployment, factory.Codec)
		},
		recorder: eventBroadcaster.NewRecorder(kapi.EventSource{Component: "deployer"}),
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				kutil.HandleError(err)
				if _, isFatal := err.(fatalError); isFatal {
					return false
				}
				if retries.Count > 0 {
					return false
				}
				return true
			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			config := obj.(*deployapi.DeploymentConfig)
			return changeController.Handle(config)
		},
	}
}
func TestBootstrapPolicyAuthenticatedUsersAgainstOpenshiftNamespace(t *testing.T) {
	_, clusterAdminKubeConfig, err := testutil.StartTestMaster()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	valerieClientConfig := *clusterAdminClientConfig
	valerieClientConfig.Username = ""
	valerieClientConfig.Password = ""
	valerieClientConfig.BearerToken = ""
	valerieClientConfig.CertFile = ""
	valerieClientConfig.KeyFile = ""
	valerieClientConfig.CertData = nil
	valerieClientConfig.KeyData = nil

	accessToken, err := tokencmd.RequestToken(&valerieClientConfig, nil, "valerie", "security!")
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	valerieClientConfig.BearerToken = accessToken
	valerieOpenshiftClient, err := client.New(&valerieClientConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	openshiftSharedResourcesNamespace := "openshift"

	if _, err := valerieOpenshiftClient.Templates(openshiftSharedResourcesNamespace).List(labels.Everything(), fields.Everything()); err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if _, err := valerieOpenshiftClient.Templates(kapi.NamespaceDefault).List(labels.Everything(), fields.Everything()); err == nil || !kapierror.IsForbidden(err) {
		t.Errorf("unexpected error: %v", err)
	}

	if _, err := valerieOpenshiftClient.ImageStreams(openshiftSharedResourcesNamespace).List(labels.Everything(), fields.Everything()); err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	if _, err := valerieOpenshiftClient.ImageStreams(kapi.NamespaceDefault).List(labels.Everything(), fields.Everything()); err == nil || !kapierror.IsForbidden(err) {
		t.Errorf("unexpected error: %v", err)
	}

	if _, err := valerieOpenshiftClient.ImageStreamTags(openshiftSharedResourcesNamespace).Get("name", "tag"); !kapierror.IsNotFound(err) {
		t.Errorf("unexpected error: %v", err)
	}
	if _, err := valerieOpenshiftClient.ImageStreamTags(kapi.NamespaceDefault).Get("name", "tag"); err == nil || !kapierror.IsForbidden(err) {
		t.Errorf("unexpected error: %v", err)
	}
}
Beispiel #29
0
// runServices loops forever looking for changes to services
func (s *SourceAPI) runServices(resourceVersion *uint64) {
	watcher, err := s.client.WatchServices(labels.Everything(), labels.Everything(), *resourceVersion)
	if err != nil {
		glog.Errorf("Unable to watch for services changes: %v", err)
		time.Sleep(wait.Jitter(s.waitDuration, 0.0))
		return
	}
	defer watcher.Stop()

	ch := watcher.ResultChan()
	handleServicesWatch(resourceVersion, ch, s.services)
}
Beispiel #30
0
func main() {
	config := &client.Config{
		Host:     "http://localhost:8080",
		Username: "******",
		Password: "******",
	}
	client, err := client.New(config)
	if err != nil {
		// handle error
	}
	//TODO: have namespace a flag for the script
	ns := api.NamespaceDefault
	list, err := client.Services(ns).List(labels.Everything())
	if err != nil {
		fmt.Println(err)
	}
	//TODO: allow selction of nodes by labels or fields
	nodes, err := client.Nodes().List(labels.Everything(), fields.Everything())
	if err != nil {
		fmt.Println(err)
	}
	//TODO: this data structure is a mess. Maybe put in Kubernetes
	// client helper methods for some of these data points
	var templateServiceServers []ServiceServers
	for _, service := range list.Items {
		if service.Name == "kubernetes" {
			// TODO: better way of filtering out the kubernetes service?
			continue
		}
		s := &ServiceServers{ServiceName: service.Name}
		sp := &ServerPort{}
		for _, port := range service.Spec.Ports {
			sp.ServicePort = port.NodePort
		}
		for _, node := range nodes.Items {
			for _, address := range node.Status.Addresses {
				sp.ServerAddress = address.Address
			}
		}
		s.ServerPorts = append(s.ServerPorts, *sp)

		templateServiceServers = append(templateServiceServers, *s)
	}

	// TODO: allow your own template file
	t := template.New("Nginx config template")
	t, err = t.ParseFiles("nginx.tmpl")
	if err != nil {
		fmt.Println(err)
	}
	t.ExecuteTemplate(os.Stdout, "nginx.tmpl", templateServiceServers)
}