Ejemplo n.º 1
0
// NewDockercfgController returns a new *DockercfgController.
func NewDockercfgController(cl client.Interface, options DockercfgControllerOptions) *DockercfgController {
	e := &DockercfgController{
		client: cl,
	}

	_, e.serviceAccountController = framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&api.ServiceAccount{},
		options.Resync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.serviceAccountAdded,
			UpdateFunc: e.serviceAccountUpdated,
		},
	)

	e.dockerURL = options.DefaultDockerURL

	return e
}
Ejemplo n.º 2
0
func RunProjectCache(c client.Interface, defaultNodeSelector string) {
	if pcache != nil {
		return
	}

	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return c.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&kapi.Namespace{},
		store,
		0,
	)
	reflector.Run()
	pcache = &ProjectCache{
		Client:              c,
		Store:               store,
		DefaultNodeSelector: defaultNodeSelector,
	}
}
Ejemplo n.º 3
0
func watchNodes(client *client.Client) {
	nodeList, err := client.Nodes().List(labels.Everything(), fields.Everything())
	if err != nil {
		log.Fatal(err)
	}
	nodes := nodeList.Items
	writeNodeTargetsFile(nodes)
	watcher, err := client.Nodes().Watch(labels.Everything(), fields.Everything(), nodeList.ResourceVersion)
	if err != nil {
		log.Fatal(err)
	}

	for event := range watcher.ResultChan() {
		switch event.Type {
		case watch.Added:
			switch obj := event.Object.(type) {
			case *api.Node:
				nodes = append(nodes, *obj)
			}
			writeNodeTargetsFile(nodes)
		case watch.Deleted:
			switch obj := event.Object.(type) {
			case *api.Node:
				index := findNodeIndexInSlice(nodes, obj)
				nodes = append(nodes[:index], nodes[index+1:]...)
			}
			writeNodeTargetsFile(nodes)
		}
	}
}
Ejemplo n.º 4
0
func (oi *OsdnRegistryInterface) WatchMinions(receiver chan *osdnapi.MinionEvent, stop chan bool) error {
	minionEventQueue := oscache.NewEventQueue(cache.MetaNamespaceKeyFunc)
	listWatch := &cache.ListWatch{
		ListFunc: func() (runtime.Object, error) {
			return oi.kClient.Nodes().List(labels.Everything(), fields.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return oi.kClient.Nodes().Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	cache.NewReflector(listWatch, &kapi.Node{}, minionEventQueue, 4*time.Minute).Run()

	for {
		eventType, obj, err := minionEventQueue.Pop()
		if err != nil {
			return err
		}
		switch eventType {
		case watch.Added:
			// we should ignore the modified event because status updates cause unnecessary noise
			// the only time we would care about modified would be if the minion changes its IP address
			// and hence all nodes need to update their vtep entries for the respective subnet
			// create minionEvent
			node := obj.(*kapi.Node)
			receiver <- &osdnapi.MinionEvent{Type: osdnapi.Added, Minion: node.ObjectMeta.Name}
		case watch.Deleted:
			// TODO: There is a chance that a Delete event will not get triggered.
			// Need to use a periodic sync loop that lists and compares.
			node := obj.(*kapi.Node)
			receiver <- &osdnapi.MinionEvent{Type: osdnapi.Deleted, Minion: node.ObjectMeta.Name}
		}
	}
	return nil
}
Ejemplo n.º 5
0
func NewReadOnlyClusterPolicyCache(registry clusterpolicyregistry.WatchingRegistry) readOnlyClusterPolicyCache {
	ctx := kapi.WithNamespace(kapi.NewContext(), kapi.NamespaceAll)

	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})

	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return registry.ListClusterPolicies(ctx, labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return registry.WatchClusterPolicies(ctx, labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&authorizationapi.ClusterPolicy{},
		indexer,
		2*time.Minute,
	)

	return readOnlyClusterPolicyCache{
		registry:  registry,
		indexer:   indexer,
		reflector: *reflector,

		keyFunc: cache.MetaNamespaceKeyFunc,
	}
}
// NewNamespaceManager creates a new NamespaceManager
func NewNamespaceManager(kubeClient client.Interface, resyncPeriod time.Duration) *NamespaceManager {
	_, controller := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.Namespace{},
		resyncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				namespace := obj.(*api.Namespace)
				syncNamespace(kubeClient, *namespace)
			},
			UpdateFunc: func(oldObj, newObj interface{}) {
				namespace := newObj.(*api.Namespace)
				syncNamespace(kubeClient, *namespace)
			},
		},
	)

	return &NamespaceManager{
		controller: controller,
	}
}
Ejemplo n.º 7
0
// Create creates an ImportController.
func (f *ImportControllerFactory) Create() controller.RunnableController {
	lw := &cache.ListWatch{
		ListFunc: func() (runtime.Object, error) {
			return f.Client.ImageStreams(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
		},
		WatchFunc: func(resourceVersion string) (watch.Interface, error) {
			return f.Client.ImageStreams(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
		},
	}
	q := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(lw, &api.ImageStream{}, q, 2*time.Minute).Run()

	c := &ImportController{
		client:   dockerregistry.NewClient(),
		streams:  f.Client,
		mappings: f.Client,
	}

	return &controller.RetryController{
		Queue: q,
		RetryManager: controller.NewQueueRetryManager(
			q,
			cache.MetaNamespaceKeyFunc,
			func(obj interface{}, err error, retries controller.Retry) bool {
				util.HandleError(err)
				return retries.Count < 5
			},
			kutil.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			r := obj.(*api.ImageStream)
			return c.Next(r)
		},
	}
}
Ejemplo n.º 8
0
// Simplified version of RunRC, that does not create RC, but creates plain Pods and
// requires passing whole Pod definition, which is needed to test various Scheduler predicates.
func startPods(c *client.Client, replicas int, ns string, podNamePrefix string, pod api.Pod) {
	pods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
	expectNoError(err)
	podsRunningBefore := len(pods.Items)

	for i := 0; i < replicas; i++ {
		podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
		pod.ObjectMeta.Name = podName
		pod.ObjectMeta.Labels["name"] = podName
		pod.Spec.Containers[0].Name = podName
		_, err = c.Pods(ns).Create(&pod)
		expectNoError(err)
	}

	// Wait for pods to start running.
	timeout := 2 * time.Minute
	startTime := time.Now()
	currentlyRunningPods := 0
	for podsRunningBefore+replicas != currentlyRunningPods {
		allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
		expectNoError(err)
		runningPods := 0
		for _, pod := range allPods.Items {
			if pod.Status.Phase == api.PodRunning {
				runningPods += 1
			}
		}
		currentlyRunningPods = runningPods
		if startTime.Add(timeout).Before(time.Now()) {
			break
		}
		time.Sleep(5 * time.Second)
	}
	Expect(currentlyRunningPods).To(Equal(podsRunningBefore + replicas))
}
Ejemplo n.º 9
0
func checkExistingRCRecovers(f Framework) {
	By("assert that the pre-existing replication controller recovers")
	podClient := f.Client.Pods(f.Namespace.Name)
	rcSelector := labels.Set{"name": "baz"}.AsSelector()

	By("deleting pods from existing replication controller")
	expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
		pods, err := podClient.List(rcSelector, fields.Everything())
		Expect(err).NotTo(HaveOccurred())
		if len(pods.Items) == 0 {
			return false, nil
		}
		for _, pod := range pods.Items {
			err = podClient.Delete(pod.Name, api.NewDeleteOptions(0))
			Expect(err).NotTo(HaveOccurred())
		}
		return true, nil
	}))

	By("waiting for replication controller to recover")
	expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
		pods, err := podClient.List(rcSelector, fields.Everything())
		Expect(err).NotTo(HaveOccurred())
		for _, pod := range pods.Items {
			if api.IsPodReady(&pod) {
				return true, nil
			}
		}
		return false, nil
	}))
}
Ejemplo n.º 10
0
// NewSourceAPIserver creates config source that watches for changes to the services and endpoints.
func NewSourceAPI(c *client.Client, period time.Duration, servicesChan chan<- ServiceUpdate, endpointsChan chan<- EndpointsUpdate) {
	servicesLW := cache.NewListWatchFromClient(c, "services", api.NamespaceAll, fields.Everything())
	endpointsLW := cache.NewListWatchFromClient(c, "endpoints", api.NamespaceAll, fields.Everything())

	newServicesSourceApiFromLW(servicesLW, period, servicesChan)
	newEndpointsSourceApiFromLW(endpointsLW, period, endpointsChan)
}
Ejemplo n.º 11
0
func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, error) {
	g := osgraph.New()

	svcs, err := d.K.Services(namespace).List(labels.Everything())
	if err != nil {
		return g, err
	}

	iss, err := d.C.ImageStreams(namespace).List(labels.Everything(), fields.Everything())
	if err != nil {
		return g, err
	}

	bcs, err := d.C.BuildConfigs(namespace).List(labels.Everything(), fields.Everything())
	if err != nil {
		return g, err
	}

	dcs, err := d.C.DeploymentConfigs(namespace).List(labels.Everything(), fields.Everything())
	if err != nil {
		return g, err
	}

	builds := &buildapi.BuildList{}
	if len(bcs.Items) > 0 {
		if b, err := d.C.Builds(namespace).List(labels.Everything(), fields.Everything()); err == nil {
			builds = b
		}
	}

	rcs, err := d.K.ReplicationControllers(namespace).List(labels.Everything())
	if err != nil {
		rcs = &kapi.ReplicationControllerList{}
	}

	for i := range iss.Items {
		imagegraph.EnsureImageStreamNode(g, &iss.Items[i])
		imagegraph.EnsureAllImageStreamTagNodes(g, &iss.Items[i])
	}
	for i := range bcs.Items {
		build := buildgraph.EnsureBuildConfigNode(g, &bcs.Items[i])
		buildedges.AddInputOutputEdges(g, build)
		buildedges.JoinBuilds(build, builds.Items)
	}
	for i := range dcs.Items {
		deploy := deploygraph.EnsureDeploymentConfigNode(g, &dcs.Items[i])
		deployedges.AddTriggerEdges(g, deploy)
		deployedges.JoinDeployments(deploy, rcs.Items)
	}
	for i := range svcs.Items {
		service := kubegraph.EnsureServiceNode(g, &svcs.Items[i])
		kubeedges.AddExposedPodTemplateSpecEdges(g, service)
	}

	imageedges.AddAllImageStreamRefEdges(g)

	return g, nil
}
Ejemplo n.º 12
0
// watchLoop loops forever looking for new events.  If an error occurs it will close the channel and return.
func watchLoop(eventClient kubeclient.EventInterface, eventsChan chan<- eventsUpdate, errorChan chan<- error) {
	defer close(eventsChan)
	defer close(errorChan)
	events, err := eventClient.List(kubelabels.Everything(), kubefields.Everything())
	if err != nil {
		glog.Errorf("Failed to load events: %v", err)
		errorChan <- err
		return
	}
	resourceVersion := events.ResourceVersion
	eventsChan <- eventsUpdate{events: events}

	watcher, err := eventClient.Watch(kubelabels.Everything(), kubefields.Everything(), resourceVersion)
	if err != nil {
		glog.Errorf("Failed to start watch for new events: %v", err)
		errorChan <- err
		return
	}
	defer watcher.Stop()

	watchChannel := watcher.ResultChan()
	for {
		watchUpdate, ok := <-watchChannel
		if !ok {
			err := errors.New("watchLoop channel closed")
			errorChan <- err
			return
		}

		if watchUpdate.Type == kubewatch.Error {
			if status, ok := watchUpdate.Object.(*kubeapi.Status); ok {
				err := fmt.Errorf("Error during watch: %#v", status)
				errorChan <- err
				return
			}
			err := fmt.Errorf("Received unexpected error: %#v", watchUpdate.Object)
			errorChan <- err
			return
		}

		if event, ok := watchUpdate.Object.(*kubeapi.Event); ok {

			switch watchUpdate.Type {
			case kubewatch.Added, kubewatch.Modified:
				eventsChan <- eventsUpdate{&kubeapi.EventList{Items: []kubeapi.Event{*event}}}
			case kubewatch.Deleted:
				// Deleted events are silently ignored
			default:
				err := fmt.Errorf("Unknown watchUpdate.Type: %#v", watchUpdate.Type)
				errorChan <- err
				return
			}
			resourceVersion = event.ResourceVersion
			continue
		}
	}
}
Ejemplo n.º 13
0
// newLoadBalancerController creates a new controller from the given config.
func newLoadBalancerController(cfg *loadBalancerConfig, kubeClient *client.Client, namespace string) *loadBalancerController {

	lbc := loadBalancerController{
		cfg:    cfg,
		client: kubeClient,
		queue:  workqueue.New(),
		reloadRateLimiter: util.NewTokenBucketRateLimiter(
			reloadQPS, int(reloadQPS)),
		targetService:   *targetService,
		forwardServices: *forwardServices,
		httpPort:        *httpPort,
		tcpServices:     map[string]int{},
	}

	for _, service := range strings.Split(*tcpServices, ",") {
		portSplit := strings.Split(service, ":")
		if len(portSplit) != 2 {
			glog.Errorf("Ignoring misconfigured TCP service %v", service)
			continue
		}
		if port, err := strconv.Atoi(portSplit[1]); err != nil {
			glog.Errorf("Ignoring misconfigured TCP service %v: %v", service, err)
			continue
		} else {
			lbc.tcpServices[portSplit[0]] = port
		}
	}
	enqueue := func(obj interface{}) {
		key, err := keyFunc(obj)
		if err != nil {
			glog.Infof("Couldn't get key for object %+v: %v", obj, err)
			return
		}
		lbc.queue.Add(key)
	}
	eventHandlers := framework.ResourceEventHandlerFuncs{
		AddFunc:    enqueue,
		DeleteFunc: enqueue,
		UpdateFunc: func(old, cur interface{}) {
			if !reflect.DeepEqual(old, cur) {
				enqueue(cur)
			}
		},
	}

	lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
		cache.NewListWatchFromClient(
			lbc.client, "services", namespace, fields.Everything()),
		&api.Service{}, resyncPeriod, eventHandlers)

	lbc.epLister.Store, lbc.epController = framework.NewInformer(
		cache.NewListWatchFromClient(
			lbc.client, "endpoints", namespace, fields.Everything()),
		&api.Endpoints{}, resyncPeriod, eventHandlers)

	return &lbc
}
Ejemplo n.º 14
0
func TestAuthorizationRestrictedAccessForProjectAdmins(t *testing.T) {
	_, clusterAdminKubeConfig, err := testutil.StartTestMaster()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	haroldClient, err := testutil.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, "hammer-project", "harold")
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	markClient, err := testutil.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, "mallet-project", "mark")
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	_, err = haroldClient.DeploymentConfigs("hammer-project").List(labels.Everything(), fields.Everything())
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	_, err = markClient.DeploymentConfigs("hammer-project").List(labels.Everything(), fields.Everything())
	if (err == nil) || !kapierror.IsForbidden(err) {
		t.Fatalf("unexpected error: %v", err)
	}

	// projects are a special case where a get of a project actually sets a namespace.  Make sure that
	// the namespace is properly special cased and set for authorization rules
	_, err = haroldClient.Projects().Get("hammer-project")
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	_, err = markClient.Projects().Get("hammer-project")
	if (err == nil) || !kapierror.IsForbidden(err) {
		t.Fatalf("unexpected error: %v", err)
	}

	// wait for the project authorization cache to catch the change.  It is on a one second period
	waitForProject(t, haroldClient, "hammer-project", 1*time.Second, 10)
	waitForProject(t, markClient, "mallet-project", 1*time.Second, 10)
}
Ejemplo n.º 15
0
// NewTokensController returns a new *TokensController.
func NewTokensController(cl client.Interface, options TokensControllerOptions) *TokensController {
	e := &TokensController{
		client: cl,
		token:  options.TokenGenerator,
		rootCA: options.RootCA,
	}

	e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&api.ServiceAccount{},
		options.ServiceAccountResync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.serviceAccountAdded,
			UpdateFunc: e.serviceAccountUpdated,
			DeleteFunc: e.serviceAccountDeleted,
		},
		cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
	)

	tokenSelector := fields.SelectorFromSet(map[string]string{client.SecretType: string(api.SecretTypeServiceAccountToken)})
	e.secrets, e.secretController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.Secrets(api.NamespaceAll).List(labels.Everything(), tokenSelector)
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return e.client.Secrets(api.NamespaceAll).Watch(labels.Everything(), tokenSelector, rv)
			},
		},
		&api.Secret{},
		options.SecretResync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.secretAdded,
			UpdateFunc: e.secretUpdated,
			DeleteFunc: e.secretDeleted,
		},
		cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
	)

	e.serviceAccountsSynced = e.serviceAccountController.HasSynced
	e.secretsSynced = e.secretController.HasSynced

	return e
}
// NewPersistentVolumeClaimBinder creates a new PersistentVolumeClaimBinder
func NewPersistentVolumeClaimBinder(kubeClient client.Interface, syncPeriod time.Duration) *PersistentVolumeClaimBinder {
	volumeIndex := NewPersistentVolumeOrderedIndex()
	binderClient := NewBinderClient(kubeClient)
	binder := &PersistentVolumeClaimBinder{
		volumeIndex: volumeIndex,
		client:      binderClient,
	}

	_, volumeController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.PersistentVolumes().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return kubeClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.PersistentVolume{},
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    binder.addVolume,
			UpdateFunc: binder.updateVolume,
			DeleteFunc: binder.deleteVolume,
		},
	)
	_, claimController := framework.NewInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return kubeClient.PersistentVolumeClaims(api.NamespaceAll).List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return kubeClient.PersistentVolumeClaims(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&api.PersistentVolumeClaim{},
		syncPeriod,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    binder.addClaim,
			UpdateFunc: binder.updateClaim,
			// no DeleteFunc needed.  a claim requires no clean-up.
			// syncVolume handles the missing claim
		},
	)

	binder.claimController = claimController
	binder.volumeController = volumeController

	return binder
}
Ejemplo n.º 17
0
func doServiceAccountAPIRequests(t *testing.T, c *client.Client, ns string, authenticated bool, canRead bool, canWrite bool) {
	testSecret := &api.Secret{
		ObjectMeta: api.ObjectMeta{Name: "testSecret"},
		Data:       map[string][]byte{"test": []byte("data")},
	}

	readOps := []testOperation{
		func() error { _, err := c.Secrets(ns).List(labels.Everything(), fields.Everything()); return err },
		func() error { _, err := c.Pods(ns).List(labels.Everything(), fields.Everything()); return err },
	}
	writeOps := []testOperation{
		func() error { _, err := c.Secrets(ns).Create(testSecret); return err },
		func() error { return c.Secrets(ns).Delete(testSecret.Name) },
	}

	for _, op := range readOps {
		err := op()
		unauthorizedError := errors.IsUnauthorized(err)
		forbiddenError := errors.IsForbidden(err)

		switch {
		case !authenticated && !unauthorizedError:
			t.Fatalf("expected unauthorized error, got %v", err)
		case authenticated && unauthorizedError:
			t.Fatalf("unexpected unauthorized error: %v", err)
		case authenticated && canRead && forbiddenError:
			t.Fatalf("unexpected forbidden error: %v", err)
		case authenticated && !canRead && !forbiddenError:
			t.Fatalf("expected forbidden error, got: %v", err)
		}
	}

	for _, op := range writeOps {
		err := op()
		unauthorizedError := errors.IsUnauthorized(err)
		forbiddenError := errors.IsForbidden(err)

		switch {
		case !authenticated && !unauthorizedError:
			t.Fatalf("expected unauthorized error, got %v", err)
		case authenticated && unauthorizedError:
			t.Fatalf("unexpected unauthorized error: %v", err)
		case authenticated && canWrite && forbiddenError:
			t.Fatalf("unexpected forbidden error: %v", err)
		case authenticated && !canWrite && !forbiddenError:
			t.Fatalf("expected forbidden error, got: %v", err)
		}
	}
}
// NewServiceAccountsController returns a new *ServiceAccountsController.
func NewServiceAccountsController(cl client.Interface, options ServiceAccountsControllerOptions) *ServiceAccountsController {
	e := &ServiceAccountsController{
		client: cl,
		names:  options.Names,
	}

	accountSelector := fields.Everything()
	if len(options.Names) == 1 {
		// If we're maintaining a single account, we can scope the accounts we watch to just that name
		accountSelector = fields.SelectorFromSet(map[string]string{client.ObjectNameField: options.Names.List()[0]})
	}
	e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).List(labels.Everything(), accountSelector)
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return e.client.ServiceAccounts(api.NamespaceAll).Watch(labels.Everything(), accountSelector, rv)
			},
		},
		&api.ServiceAccount{},
		options.ServiceAccountResync,
		framework.ResourceEventHandlerFuncs{
			DeleteFunc: e.serviceAccountDeleted,
		},
		cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
	)

	e.namespaces, e.namespaceController = framework.NewIndexerInformer(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return e.client.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(rv string) (watch.Interface, error) {
				return e.client.Namespaces().Watch(labels.Everything(), fields.Everything(), rv)
			},
		},
		&api.Namespace{},
		options.NamespaceResync,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    e.namespaceAdded,
			UpdateFunc: e.namespaceUpdated,
		},
		cache.Indexers{"name": nameIndexFunc},
	)

	return e
}
Ejemplo n.º 19
0
func TestBootstrapPolicyOverwritePolicyCommand(t *testing.T) {
	masterConfig, clusterAdminKubeConfig, err := testutil.StartTestMaster()
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	client, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	if err := client.ClusterPolicies().Delete(authorizationapi.PolicyName); err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	// after the policy is deleted, we must wait for it to be cleared from the policy cache
	err = wait.Poll(10*time.Millisecond, 10*time.Second, func() (bool, error) {
		_, err := client.ClusterPolicies().List(labels.Everything(), fields.Everything())
		if err == nil {
			return false, nil
		}
		if !kapierror.IsForbidden(err) {
			t.Errorf("unexpected error: %v", err)
		}
		return true, nil
	})
	if err != nil {
		t.Errorf("timeout: %v", err)
	}

	etcdClient, err := etcd.GetAndTestEtcdClient(masterConfig.EtcdClientInfo)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	etcdHelper, err := origin.NewEtcdStorage(etcdClient, masterConfig.EtcdStorageConfig.OpenShiftStorageVersion, masterConfig.EtcdStorageConfig.OpenShiftStoragePrefix)
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	if err := admin.OverwriteBootstrapPolicy(etcdHelper, masterConfig.PolicyConfig.BootstrapPolicyFile, admin.CreateBootstrapPolicyFileFullCommand, true, ioutil.Discard); err != nil {
		t.Errorf("unexpected error: %v", err)
	}

	if _, err := client.ClusterPolicies().List(labels.Everything(), fields.Everything()); err != nil {
		t.Errorf("unexpected error: %v", err)
	}
}
Ejemplo n.º 20
0
func TestServiceRegistryList(t *testing.T) {
	ctx := api.NewDefaultContext()
	storage, registry := NewTestREST(t, nil)
	registry.CreateService(ctx, &api.Service{
		ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault},
		Spec: api.ServiceSpec{
			Selector: map[string]string{"bar": "baz"},
		},
	})
	registry.CreateService(ctx, &api.Service{
		ObjectMeta: api.ObjectMeta{Name: "foo2", Namespace: api.NamespaceDefault},
		Spec: api.ServiceSpec{
			Selector: map[string]string{"bar2": "baz2"},
		},
	})
	registry.List.ResourceVersion = "1"
	s, _ := storage.List(ctx, labels.Everything(), fields.Everything())
	sl := s.(*api.ServiceList)
	if len(sl.Items) != 2 {
		t.Fatalf("Expected 2 services, but got %v", len(sl.Items))
	}
	if e, a := "foo", sl.Items[0].Name; e != a {
		t.Errorf("Expected %v, but got %v", e, a)
	}
	if e, a := "foo2", sl.Items[1].Name; e != a {
		t.Errorf("Expected %v, but got %v", e, a)
	}
	if sl.ResourceVersion != "1" {
		t.Errorf("Unexpected resource version: %#v", sl)
	}
}
Ejemplo n.º 21
0
func TestEtcdListPersistentVolumeClaims(t *testing.T) {
	ctx := api.NewDefaultContext()
	registry, _, fakeClient, _ := newStorage(t)
	key := registry.KeyRootFunc(ctx)
	key = etcdtest.AddPrefix(key)
	fakeClient.Data[key] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Nodes: []*etcd.Node{
					{
						Value: runtime.EncodeOrDie(latest.Codec, validNewPersistentVolumeClaim("foo", api.NamespaceDefault)),
					},
					{
						Value: runtime.EncodeOrDie(latest.Codec, validNewPersistentVolumeClaim("bar", api.NamespaceDefault)),
					},
				},
			},
		},
		E: nil,
	}

	pvObj, err := registry.List(ctx, labels.Everything(), fields.Everything())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	pvs := pvObj.(*api.PersistentVolumeClaimList)

	if len(pvs.Items) != 2 || pvs.Items[0].Name != "foo" || pvs.Items[1].Name != "bar" {
		t.Errorf("Unexpected persistentVolume list: %#v", pvs)
	}
}
Ejemplo n.º 22
0
func TestEtcdWatchController(t *testing.T) {
	ctx := api.NewDefaultContext()
	storage, fakeClient := newStorage(t)
	watching, err := storage.Watch(ctx,
		labels.Everything(),
		fields.Everything(),
		"1",
	)
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}
	fakeClient.WaitForWatchCompletion()

	select {
	case _, ok := <-watching.ResultChan():
		if !ok {
			t.Errorf("watching channel should be open")
		}
	default:
	}
	fakeClient.WatchInjectError <- nil
	if _, ok := <-watching.ResultChan(); ok {
		t.Errorf("watching channel should be closed")
	}
	watching.Stop()
}
Ejemplo n.º 23
0
func TestEtcdListControllers(t *testing.T) {
	storage, fakeClient := newStorage(t)
	ctx := api.NewDefaultContext()
	key := makeControllerListKey(ctx)
	key = etcdtest.AddPrefix(key)
	controller := validController
	controller.Name = "bar"
	fakeClient.Data[key] = tools.EtcdResponseWithError{
		R: &etcd.Response{
			Node: &etcd.Node{
				Nodes: []*etcd.Node{
					{
						Value: runtime.EncodeOrDie(latest.Codec, &validController),
					},
					{
						Value: runtime.EncodeOrDie(latest.Codec, &controller),
					},
				},
			},
		},
		E: nil,
	}
	objList, err := storage.List(ctx, labels.Everything(), fields.Everything())
	if err != nil {
		t.Errorf("unexpected error: %v", err)
	}
	controllers, _ := objList.(*api.ReplicationControllerList)
	if len(controllers.Items) != 2 || controllers.Items[0].Name != validController.Name || controllers.Items[1].Name != controller.Name {
		t.Errorf("Unexpected controller list: %#v", controllers)
	}
}
func (s *ServiceController) createExternalLoadBalancer(service *api.Service) error {
	ports, err := getTCPPorts(service)
	if err != nil {
		return err
	}
	nodes, err := s.kubeClient.Nodes().List(labels.Everything(), fields.Everything())
	if err != nil {
		return err
	}
	name := s.loadBalancerName(service)
	if len(service.Spec.PublicIPs) > 0 {
		for _, publicIP := range service.Spec.PublicIPs {
			// TODO: Make this actually work for multiple IPs by using different
			// names for each. For now, we'll just create the first and break.
			endpoint, err := s.balancer.CreateTCPLoadBalancer(name, s.zone.Region, net.ParseIP(publicIP),
				ports, hostsFromNodeList(nodes), service.Spec.SessionAffinity)
			if err != nil {
				return err
			}
			service.Spec.PublicIPs = []string{endpoint}
			break
		}
	} else {
		endpoint, err := s.balancer.CreateTCPLoadBalancer(name, s.zone.Region, nil,
			ports, hostsFromNodeList(nodes), service.Spec.SessionAffinity)
		if err != nil {
			return err
		}
		service.Spec.PublicIPs = []string{endpoint}
	}
	return nil
}
Ejemplo n.º 25
0
// afterEach deletes the namespace, after reading its events.
func (f *Framework) afterEach() {
	// Print events if the test failed.
	if CurrentGinkgoTestDescription().Failed {
		By(fmt.Sprintf("Collecting events from namespace %q.", f.Namespace.Name))
		events, err := f.Client.Events(f.Namespace.Name).List(labels.Everything(), fields.Everything())
		Expect(err).NotTo(HaveOccurred())

		for _, e := range events.Items {
			Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
		}
		// Note that we don't wait for any cleanup to propagate, which means
		// that if you delete a bunch of pods right before ending your test,
		// you may or may not see the killing/deletion/cleanup events.
	}

	// Check whether all nodes are ready after the test.
	if err := allNodesReady(f.Client, time.Minute); err != nil {
		Failf("All nodes should be ready after test, %v", err)
	}

	By(fmt.Sprintf("Destroying namespace %q for this suite.", f.Namespace.Name))
	if err := f.Client.Namespaces().Delete(f.Namespace.Name); err != nil {
		Failf("Couldn't delete ns %q: %s", f.Namespace.Name, err)
	}
	// Paranoia-- prevent reuse!
	f.Namespace = nil
	f.Client = nil
}
Ejemplo n.º 26
0
func (s *storage) ListNamespaces(ctx api.Context, label labels.Selector) (*api.NamespaceList, error) {
	obj, err := s.List(ctx, label, fields.Everything())
	if err != nil {
		return nil, err
	}
	return obj.(*api.NamespaceList), nil
}
Ejemplo n.º 27
0
func (d *NodeDescriber) Describe(namespace, name string) (string, error) {
	mc := d.Nodes()
	node, err := mc.Get(name)
	if err != nil {
		return "", err
	}

	var pods []*api.Pod
	allPods, err := d.Pods(namespace).List(labels.Everything(), fields.Everything())
	if err != nil {
		return "", err
	}
	for i := range allPods.Items {
		pod := &allPods.Items[i]
		if pod.Spec.NodeName != name {
			continue
		}
		pods = append(pods, pod)
	}

	var events *api.EventList
	if ref, err := api.GetReference(node); err != nil {
		glog.Errorf("Unable to construct reference to '%#v': %v", node, err)
	} else {
		// TODO: We haven't decided the namespace for Node object yet.
		ref.UID = types.UID(ref.Name)
		events, _ = d.Events("").Search(ref)
	}

	return describeNode(node, pods, events)
}
Ejemplo n.º 28
0
func TestListRoutesPopulatedList(t *testing.T) {
	mockRegistry := test.NewRouteRegistry()
	mockAllocator := ractest.NewTestRouteAllocationController()
	mockRegistry.Routes = &api.RouteList{
		Items: []api.Route{
			{
				ObjectMeta: kapi.ObjectMeta{
					Name: "foo",
				},
			},
			{
				ObjectMeta: kapi.ObjectMeta{
					Name: "bar",
				},
			},
		},
	}

	storage := REST{
		registry:  mockRegistry,
		allocator: mockAllocator,
	}

	list, err := storage.List(kapi.NewDefaultContext(), labels.Everything(), fields.Everything())
	if err != nil {
		t.Errorf("Unexpected non-nil error: %#v", err)
	}

	routes := list.(*api.RouteList)

	if e, a := 2, len(routes.Items); e != a {
		t.Errorf("Expected %v, got %v", e, a)
	}
}
Ejemplo n.º 29
0
func (s *storage) ListClients(ctx kapi.Context, label labels.Selector) (*api.OAuthClientList, error) {
	obj, err := s.List(ctx, label, fields.Everything())
	if err != nil {
		return nil, err
	}
	return obj.(*api.OAuthClientList), nil
}
Ejemplo n.º 30
0
func TestListMinionsLabels(t *testing.T) {
	ns := api.NamespaceNone
	labelSelectorQueryParamName := api.LabelSelectorQueryParam(testapi.Version())
	c := &testClient{
		Request: testRequest{
			Method: "GET",
			Path:   testapi.ResourcePath(getNodesResourceName(), "", ""),
			Query:  buildQueryValues(ns, url.Values{labelSelectorQueryParamName: []string{"foo=bar,name=baz"}})},
		Response: Response{
			StatusCode: 200,
			Body: &api.NodeList{
				Items: []api.Node{
					{
						ObjectMeta: api.ObjectMeta{
							Labels: map[string]string{
								"foo":  "bar",
								"name": "baz",
							},
						},
					},
				},
			},
		},
	}
	c.Setup()
	c.QueryValidator[labelSelectorQueryParamName] = validateLabels
	selector := labels.Set{"foo": "bar", "name": "baz"}.AsSelector()
	receivedNodeList, err := c.Nodes().List(selector, fields.Everything())
	c.Validate(t, receivedNodeList, err)
}