Пример #1
0
// NewClient returns a usable Client. Don't forget to Stop it.
func NewClient(addr string, resyncPeriod time.Duration) (Client, error) {
	c, err := unversioned.New(&unversioned.Config{Host: addr})
	if err != nil {
		return nil, err
	}

	podListWatch := cache.NewListWatchFromClient(c, "pods", api.NamespaceAll, fields.Everything())
	podStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
	podReflector := cache.NewReflector(podListWatch, &api.Pod{}, podStore, resyncPeriod)

	serviceListWatch := cache.NewListWatchFromClient(c, "services", api.NamespaceAll, fields.Everything())
	serviceStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
	serviceReflector := cache.NewReflector(serviceListWatch, &api.Service{}, serviceStore, resyncPeriod)

	quit := make(chan struct{})
	podReflector.RunUntil(quit)
	serviceReflector.RunUntil(quit)

	return &client{
		quit:             quit,
		client:           c,
		podReflector:     podReflector,
		podStore:         &cache.StoreToPodLister{Store: podStore},
		serviceReflector: serviceReflector,
		serviceStore:     &cache.StoreToServiceLister{Store: serviceStore},
	}, nil
}
Пример #2
0
func NewCache() *Cache {
	return &Cache{
		ingress:   cache.NewStore(cache.MetaNamespaceKeyFunc),
		service:   cache.NewStore(cache.MetaNamespaceKeyFunc),
		endpoints: cache.NewStore(cache.MetaNamespaceKeyFunc),
	}
}
Пример #3
0
// NewAuthorizationCache creates a new AuthorizationCache
func NewAuthorizationCache(reviewer Reviewer, namespaceInterface kclient.NamespaceInterface,
	clusterPolicyLister client.SyncedClusterPoliciesListerInterface, clusterPolicyBindingLister client.SyncedClusterPolicyBindingsListerInterface,
	policyNamespacer client.SyncedPoliciesListerNamespacer, policyBindingNamespacer client.SyncedPolicyBindingsListerNamespacer,
) *AuthorizationCache {

	result := &AuthorizationCache{
		allKnownNamespaces:        sets.String{},
		namespaceStore:            cache.NewStore(cache.MetaNamespaceKeyFunc),
		namespaceInterface:        namespaceInterface,
		lastSyncResourceVersioner: &unchangingLastSyncResourceVersioner{},

		clusterPolicyResourceVersions:  sets.NewString(),
		clusterBindingResourceVersions: sets.NewString(),

		clusterPolicyLister:             clusterPolicyLister,
		clusterPolicyBindingLister:      clusterPolicyBindingLister,
		policyNamespacer:                policyNamespacer,
		policyBindingNamespacer:         policyBindingNamespacer,
		policyLastSyncResourceVersioner: unionLastSyncResourceVersioner{clusterPolicyLister, clusterPolicyBindingLister, policyNamespacer, policyBindingNamespacer},

		reviewRecordStore:       cache.NewStore(reviewRecordKeyFn),
		userSubjectRecordStore:  cache.NewStore(subjectRecordKeyFn),
		groupSubjectRecordStore: cache.NewStore(subjectRecordKeyFn),

		reviewer: reviewer,
		skip:     &neverSkipSynchronizer{},

		watchers: []CacheWatcher{},
	}
	result.syncHandler = result.syncRequest
	return result
}
Пример #4
0
func TestDefaultResourceFromIngress(te *testing.T) {
	var (
		is  = assert.New(te)
		m   = NewCache()
		ing = &extensions.Ingress{
			ObjectMeta: api.ObjectMeta{Name: "ingress", Namespace: "test", UID: types.UID("one")},
			Spec: extensions.IngressSpec{
				Backend: &extensions.IngressBackend{
					ServiceName: "service",
					ServicePort: intstr.FromString("web"),
				},
			},
		}
		svc = &api.Service{
			ObjectMeta: api.ObjectMeta{Name: "service", Namespace: "test", UID: types.UID("two")},
			Spec: api.ServiceSpec{
				Type:      api.ServiceTypeClusterIP,
				ClusterIP: "1.2.3.4",
				Ports: []api.ServicePort{
					api.ServicePort{Name: "web", Port: 80, TargetPort: intstr.FromString("http")},
				},
			},
		}
		end = &api.Endpoints{
			ObjectMeta: api.ObjectMeta{Name: "service", Namespace: "test", UID: types.UID("three")},
			Subsets: []api.EndpointSubset{
				api.EndpointSubset{
					Addresses: []api.EndpointAddress{
						api.EndpointAddress{IP: "10.11.12.13"},
						api.EndpointAddress{IP: "10.20.21.23"},
					},
					Ports: []api.EndpointPort{
						api.EndpointPort{Name: "web", Port: 8080, Protocol: api.ProtocolTCP},
					},
				},
			},
		}
	)

	if testing.Verbose() {
		logger.Configure("debug", "[romulus-test] ", os.Stdout)
		defer logger.SetLevel("error")
	}

	m.SetServiceStore(cache.NewStore(cache.MetaNamespaceKeyFunc))
	m.SetEndpointsStore(cache.NewStore(cache.MetaNamespaceKeyFunc))
	m.endpoints.Add(end)
	m.service.Add(svc)

	list := resourcesFromIngress(m, ing)
	te.Logf("Default ResourceList: %v", list)
	is.True(len(list) > 0, "ResourceList should be non-zero")
	ma := list.Map()
	rsc, ok := ma["test.service.web"]
	if is.True(ok, "'test.service.web' not created: %v", list) {
		is.False(rsc.NoServers(), "%v should have servers", rsc)
	}
}
Пример #5
0
func newKube2Sky(ec etcdClient) *kube2sky {
	return &kube2sky{
		etcdClient:          ec,
		domain:              testDomain,
		etcdMutationTimeout: time.Second,
		endpointsStore:      cache.NewStore(cache.MetaNamespaceKeyFunc),
		servicesStore:       cache.NewStore(cache.MetaNamespaceKeyFunc),
	}
}
Пример #6
0
func TestModeler(t *testing.T) {
	table := []struct {
		queuedPods    []*api.Pod
		scheduledPods []*api.Pod
		assumedPods   []*api.Pod
		expectPods    names
	}{
		{
			queuedPods:    names{}.list(),
			scheduledPods: names{{"default", "foo"}, {"custom", "foo"}}.list(),
			assumedPods:   names{{"default", "foo"}}.list(),
			expectPods:    names{{"default", "foo"}, {"custom", "foo"}},
		}, {
			queuedPods:    names{}.list(),
			scheduledPods: names{{"default", "foo"}}.list(),
			assumedPods:   names{{"default", "foo"}, {"custom", "foo"}}.list(),
			expectPods:    names{{"default", "foo"}, {"custom", "foo"}},
		}, {
			queuedPods:    names{{"custom", "foo"}}.list(),
			scheduledPods: names{{"default", "foo"}}.list(),
			assumedPods:   names{{"default", "foo"}, {"custom", "foo"}}.list(),
			expectPods:    names{{"default", "foo"}},
		},
	}

	for _, item := range table {
		q := &cache.StoreToPodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}
		for _, pod := range item.queuedPods {
			q.Store.Add(pod)
		}
		s := &cache.StoreToPodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}
		for _, pod := range item.scheduledPods {
			s.Store.Add(pod)
		}
		m := NewSimpleModeler(q, s)
		for _, pod := range item.assumedPods {
			m.AssumePod(pod)
		}

		list, err := m.PodLister().List(labels.Everything())
		if err != nil {
			t.Errorf("unexpected error: %v", err)
		}

		found := 0
		for _, pod := range list {
			if item.expectPods.has(pod) {
				found++
			} else {
				t.Errorf("found unexpected pod %#v", pod)
			}
		}
		if e, a := item.expectPods, found; len(e) != a {
			t.Errorf("Expected pods:\n%+v\nFound pods:\n%s\n", podNames(e.list()), podNames(list))
		}
	}
}
Пример #7
0
func newKubeDNS() *KubeDNS {
	kd := &KubeDNS{
		domain:         testDomain,
		endpointsStore: cache.NewStore(cache.MetaNamespaceKeyFunc),
		servicesStore:  cache.NewStore(cache.MetaNamespaceKeyFunc),
		cache:          NewTreeCache(),
		cacheLock:      sync.RWMutex{},
		domainPath:     reverseArray(strings.Split(strings.TrimRight(testDomain, "."), ".")),
		nodesStore:     cache.NewStore(cache.MetaNamespaceKeyFunc),
	}
	return kd
}
Пример #8
0
// Initializes the factory.
func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffinitySymmetricWeight int, failureDomains string) *ConfigFactory {
	stopEverything := make(chan struct{})
	schedulerCache := schedulercache.New(30*time.Second, stopEverything)

	c := &ConfigFactory{
		Client:             client,
		PodQueue:           cache.NewFIFO(cache.MetaNamespaceKeyFunc),
		ScheduledPodLister: &cache.StoreToPodLister{},
		// Only nodes in the "Ready" condition with status == "True" are schedulable
		NodeLister:                     &cache.StoreToNodeLister{},
		PVLister:                       &cache.StoreToPVFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		PVCLister:                      &cache.StoreToPVCFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		ServiceLister:                  &cache.StoreToServiceLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		ControllerLister:               &cache.StoreToReplicationControllerLister{Indexer: cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})},
		ReplicaSetLister:               &cache.StoreToReplicaSetLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		schedulerCache:                 schedulerCache,
		StopEverything:                 stopEverything,
		SchedulerName:                  schedulerName,
		HardPodAffinitySymmetricWeight: hardPodAffinitySymmetricWeight,
		FailureDomains:                 failureDomains,
	}

	c.PodLister = schedulerCache

	// On add/delete to the scheduled pods, remove from the assumed pods.
	// We construct this here instead of in CreateFromKeys because
	// ScheduledPodLister is something we provide to plug in functions that
	// they may need to call.
	c.ScheduledPodLister.Indexer, c.scheduledPodPopulator = framework.NewIndexerInformer(
		c.createAssignedNonTerminatedPodLW(),
		&api.Pod{},
		0,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    c.addPodToCache,
			UpdateFunc: c.updatePodInCache,
			DeleteFunc: c.deletePodFromCache,
		},
		cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
	)

	c.NodeLister.Store, c.nodePopulator = framework.NewInformer(
		c.createNodeLW(),
		&api.Node{},
		0,
		framework.ResourceEventHandlerFuncs{
			AddFunc:    c.addNodeToCache,
			UpdateFunc: c.updateNodeInCache,
			DeleteFunc: c.deleteNodeFromCache,
		},
	)

	return c
}
Пример #9
0
func newKubeDNS() *KubeDNS {
	kd := &KubeDNS{
		domain:              testDomain,
		endpointsStore:      cache.NewStore(cache.MetaNamespaceKeyFunc),
		servicesStore:       cache.NewStore(cache.MetaNamespaceKeyFunc),
		cache:               NewTreeCache(),
		reverseRecordMap:    make(map[string]*skymsg.Service),
		clusterIPServiceMap: make(map[string]*kapi.Service),
		cacheLock:           sync.RWMutex{},
		domainPath:          reverseArray(strings.Split(strings.TrimRight(testDomain, "."), ".")),
		nodesStore:          cache.NewStore(cache.MetaNamespaceKeyFunc),
	}
	return kd
}
Пример #10
0
func newFakeDisruptionController() (*DisruptionController, *pdbStates) {
	ps := &pdbStates{}

	dc := &DisruptionController{
		pdbLister:  cache.StoreToPodDisruptionBudgetLister{Store: cache.NewStore(controller.KeyFunc)},
		podLister:  cache.StoreToPodLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{})},
		rcLister:   cache.StoreToReplicationControllerLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})},
		rsLister:   cache.StoreToReplicaSetLister{Store: cache.NewStore(controller.KeyFunc)},
		dLister:    cache.StoreToDeploymentLister{Store: cache.NewStore(controller.KeyFunc)},
		getUpdater: func() updater { return ps.Set },
	}

	return dc, ps
}
Пример #11
0
// Initializes the factory.
func NewConfigFactory(client *client.Client, rateLimiter util.RateLimiter, schedulerName string) *ConfigFactory {
	c := &ConfigFactory{
		Client:             client,
		PodQueue:           cache.NewFIFO(cache.MetaNamespaceKeyFunc),
		ScheduledPodLister: &cache.StoreToPodLister{},
		// Only nodes in the "Ready" condition with status == "True" are schedulable
		NodeLister:       &cache.StoreToNodeLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		PVLister:         &cache.StoreToPVFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		PVCLister:        &cache.StoreToPVCFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		ServiceLister:    &cache.StoreToServiceLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		ControllerLister: &cache.StoreToReplicationControllerLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
		StopEverything:   make(chan struct{}),
		SchedulerName:    schedulerName,
	}
	modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{Store: c.PodQueue}, c.ScheduledPodLister)
	c.modeler = modeler
	c.PodLister = modeler.PodLister()
	c.BindPodsRateLimiter = rateLimiter

	// On add/delete to the scheduled pods, remove from the assumed pods.
	// We construct this here instead of in CreateFromKeys because
	// ScheduledPodLister is something we provide to plug in functions that
	// they may need to call.
	c.ScheduledPodLister.Store, c.scheduledPodPopulator = framework.NewInformer(
		c.createAssignedNonTerminatedPodLW(),
		&api.Pod{},
		0,
		framework.ResourceEventHandlerFuncs{
			AddFunc: func(obj interface{}) {
				if pod, ok := obj.(*api.Pod); ok {
					c.modeler.LockedAction(func() {
						c.modeler.ForgetPod(pod)
					})
				}
			},
			DeleteFunc: func(obj interface{}) {
				c.modeler.LockedAction(func() {
					switch t := obj.(type) {
					case *api.Pod:
						c.modeler.ForgetPod(t)
					case cache.DeletedFinalStateUnknown:
						c.modeler.ForgetPodByKey(t.Key)
					}
				})
			},
		},
	)

	return c
}
Пример #12
0
// Create constructs a BuildPodController
func (factory *BuildPodControllerFactory) Create() controller.RunnableController {
	factory.buildStore = cache.NewStore(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&buildLW{client: factory.OSClient}, &buildapi.Build{}, factory.buildStore, 2*time.Minute).RunUntil(factory.Stop)

	queue := cache.NewResyncableFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&podLW{client: factory.KubeClient}, &kapi.Pod{}, queue, 2*time.Minute).RunUntil(factory.Stop)

	client := ControllerClient{factory.KubeClient, factory.OSClient}
	buildPodController := &buildcontroller.BuildPodController{
		BuildStore:   factory.buildStore,
		BuildUpdater: factory.BuildUpdater,
		SecretClient: factory.KubeClient,
		PodManager:   client,
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			retryFunc("BuildPod", nil),
			flowcontrol.NewTokenBucketRateLimiter(1, 10)),
		Handle: func(obj interface{}) error {
			pod := obj.(*kapi.Pod)
			return buildPodController.HandlePod(pod)
		},
	}
}
Пример #13
0
// NewEventStore creates a new Store which triggers watch whenever
// an object is added, removed or updated.
func NewEventStore(watch Watch, keyFunc cache.KeyFunc) cache.Store {
	return &eventStore{
		keyFunc: keyFunc,
		watch:   watch,
		Store:   cache.NewStore(keyFunc),
	}
}
Пример #14
0
func newPodsApi(client *kclient.Client) podsApi {
	// Extend the selector to include specific nodes to monitor
	// or provide an API to update the nodes to monitor.
	selector, err := kSelector.ParseSelector("spec.nodeName!=")
	if err != nil {
		panic(err)
	}

	lw := kcache.NewListWatchFromClient(client, "pods", kapi.NamespaceAll, selector)
	podLister := &kcache.StoreToPodLister{Store: kcache.NewStore(kcache.MetaNamespaceKeyFunc)}
	// Watch and cache all running pods.
	reflector := kcache.NewReflector(lw, &kapi.Pod{}, podLister.Store, 0)
	stopChan := make(chan struct{})
	reflector.RunUntil(stopChan)
	nStore, nController := kframework.NewInformer(
		createNamespaceLW(client),
		&kapi.Namespace{},
		resyncPeriod,
		kframework.ResourceEventHandlerFuncs{})
	go nController.Run(util.NeverStop)

	podsApi := &realPodsApi{
		client:         client,
		podLister:      podLister,
		stopChan:       stopChan,
		reflector:      reflector,
		namespaceStore: nStore,
	}

	return podsApi
}
Пример #15
0
// newPlugin creates a new admission plugin.
func newPlugin(kclient clientset.Interface) *claimDefaulterPlugin {
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
				internalOptions := api.ListOptions{}
				v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
				return kclient.Storage().StorageClasses().List(internalOptions)
			},
			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
				internalOptions := api.ListOptions{}
				v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
				return kclient.Storage().StorageClasses().Watch(internalOptions)
			},
		},
		&storage.StorageClass{},
		store,
		0,
	)

	return &claimDefaulterPlugin{
		Handler:   admission.NewHandler(admission.Create),
		client:    kclient,
		store:     store,
		reflector: reflector,
	}
}
Пример #16
0
// TestAdmissionNamespaceExists verifies that no client call is made when a namespace already exists
func TestAdmissionNamespaceExists(t *testing.T) {
	namespace := "test"
	mockClient := &fake.Clientset{}
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	store.Add(&api.Namespace{
		ObjectMeta: api.ObjectMeta{Name: namespace},
	})
	handler := &provision{
		client: mockClient,
		store:  store,
	}
	pod := api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
		Spec: api.PodSpec{
			Volumes:    []api.Volume{{Name: "vol"}},
			Containers: []api.Container{{Name: "ctr", Image: "image"}},
		},
	}
	err := handler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
	if err != nil {
		t.Errorf("Unexpected error returned from admission handler")
	}
	if len(mockClient.Actions()) != 0 {
		t.Errorf("No client request should have been made")
	}
}
Пример #17
0
// TestAdmission verifies a namespace is created on create requests for namespace managed resources
func TestAdmission(t *testing.T) {
	namespace := "test"
	mockClient := &fake.Clientset{}
	handler := &provision{
		client: mockClient,
		store:  cache.NewStore(cache.MetaNamespaceKeyFunc),
	}
	pod := api.Pod{
		ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
		Spec: api.PodSpec{
			Volumes:    []api.Volume{{Name: "vol"}},
			Containers: []api.Container{{Name: "ctr", Image: "image"}},
		},
	}
	err := handler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
	if err != nil {
		t.Errorf("Unexpected error returned from admission handler")
	}
	actions := mockClient.Actions()
	if len(actions) != 1 {
		t.Errorf("Expected a create-namespace request")
	}
	if !actions[0].Matches("create", "namespaces") {
		t.Errorf("Expected a create-namespace request to be made via the client")
	}
}
Пример #18
0
func testPSPAdmit(testCaseName string, psps []*extensions.PodSecurityPolicy, pod *kapi.Pod, shouldPass bool, expectedPSP string, t *testing.T) {
	namespace := createNamespaceForTest()
	serviceAccount := createSAForTest()
	tc := clientsetfake.NewSimpleClientset(namespace, serviceAccount)
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)

	for _, psp := range psps {
		store.Add(psp)
	}

	plugin := NewTestAdmission(store, tc)

	attrs := kadmission.NewAttributesRecord(pod, nil, kapi.Kind("Pod").WithVersion("version"), "namespace", "", kapi.Resource("pods").WithVersion("version"), "", kadmission.Create, &user.DefaultInfo{})
	err := plugin.Admit(attrs)

	if shouldPass && err != nil {
		t.Errorf("%s expected no errors but received %v", testCaseName, err)
	}

	if shouldPass && err == nil {
		if pod.Annotations[psputil.ValidatedPSPAnnotation] != expectedPSP {
			t.Errorf("%s expected to validate under %s but found %s", testCaseName, expectedPSP, pod.Annotations[psputil.ValidatedPSPAnnotation])
		}
	}

	if !shouldPass && err == nil {
		t.Errorf("%s expected errors but received none", testCaseName)
	}
}
Пример #19
0
// Create creates a new ImageChangeController which is used to trigger builds when a new
// image is available
func (factory *ImageChangeControllerFactory) Create() controller.RunnableController {
	queue := cache.NewResyncableFIFO(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&imageStreamLW{factory.Client}, &imageapi.ImageStream{}, queue, 2*time.Minute).RunUntil(factory.Stop)

	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, store, 2*time.Minute).RunUntil(factory.Stop)

	imageChangeController := &buildcontroller.ImageChangeController{
		BuildConfigStore:        store,
		BuildConfigInstantiator: factory.BuildConfigInstantiator,
	}

	return &controller.RetryController{
		Queue: queue,
		RetryManager: controller.NewQueueRetryManager(
			queue,
			cache.MetaNamespaceKeyFunc,
			retryFunc("ImageStream update", func(err error) bool {
				_, isFatal := err.(buildcontroller.ImageChangeControllerFatalError)
				return isFatal
			}),
			flowcontrol.NewTokenBucketRateLimiter(1, 10),
		),
		Handle: func(obj interface{}) error {
			imageRepo := obj.(*imageapi.ImageStream)
			return imageChangeController.HandleImageRepo(imageRepo)
		},
	}
}
Пример #20
0
func getNodeLister(kubeClient *kube_client.Client) (*cache.StoreToNodeLister, error) {
	lw := cache.NewListWatchFromClient(kubeClient, "nodes", kube_api.NamespaceAll, fields.Everything())
	nodeLister := &cache.StoreToNodeLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}
	reflector := cache.NewReflector(lw, &kube_api.Node{}, nodeLister.Store, time.Hour)
	reflector.Run()
	return nodeLister, nil
}
Пример #21
0
func (a *claimDefaulterPlugin) SetInternalClientSet(client internalclientset.Interface) {
	a.client = client
	a.store = cache.NewStore(cache.MetaNamespaceKeyFunc)
	a.reflector = cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
				internalOptions := api.ListOptions{}
				v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
				return client.Storage().StorageClasses().List(internalOptions)
			},
			WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
				internalOptions := api.ListOptions{}
				v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
				return client.Storage().StorageClasses().Watch(internalOptions)
			},
		},
		&storage.StorageClass{},
		a.store,
		0,
	)

	if client != nil {
		a.Run()
	}
}
Пример #22
0
// NewPlugin creates a new PSP admission plugin.
func NewPlugin(kclient clientset.Interface, strategyFactory psp.StrategyFactory, pspMatcher PSPMatchFn, failOnNoPolicies bool) *podSecurityPolicyPlugin {
	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func(options api.ListOptions) (runtime.Object, error) {
				return kclient.Extensions().PodSecurityPolicies().List(options)
			},
			WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
				return kclient.Extensions().PodSecurityPolicies().Watch(options)
			},
		},
		&extensions.PodSecurityPolicy{},
		store,
		0,
	)

	return &podSecurityPolicyPlugin{
		Handler:          admission.NewHandler(admission.Create, admission.Update),
		client:           kclient,
		strategyFactory:  strategyFactory,
		pspMatcher:       pspMatcher,
		failOnNoPolicies: failOnNoPolicies,

		store:     store,
		reflector: reflector,
	}
}
Пример #23
0
func newKube2Vulcand(ec etcdClient) *kube2vulcand {
	return &kube2vulcand{
		etcdClient:          ec,
		etcdMutationTimeout: time.Second,
		ingressesStore:      cache.NewStore(cache.MetaNamespaceKeyFunc),
	}
}
Пример #24
0
func RunProjectCache(c client.Interface, defaultNodeSelector string) {
	if pcache != nil {
		return
	}

	store := cache.NewStore(cache.MetaNamespaceKeyFunc)
	reflector := cache.NewReflector(
		&cache.ListWatch{
			ListFunc: func() (runtime.Object, error) {
				return c.Namespaces().List(labels.Everything(), fields.Everything())
			},
			WatchFunc: func(resourceVersion string) (watch.Interface, error) {
				return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
			},
		},
		&kapi.Namespace{},
		store,
		0,
	)
	reflector.Run()
	pcache = &ProjectCache{
		Client:              c,
		Store:               store,
		DefaultNodeSelector: defaultNodeSelector,
	}
}
Пример #25
0
// NewAcceptNewlyObservedReadyPods makes a new AcceptNewlyObservedReadyPods
// from a real client.
func NewAcceptNewlyObservedReadyPods(
	out io.Writer,
	kclient kcoreclient.PodsGetter,
	timeout time.Duration,
	interval time.Duration,
	minReadySeconds int32,
) *AcceptNewlyObservedReadyPods {

	return &AcceptNewlyObservedReadyPods{
		out:             out,
		timeout:         timeout,
		interval:        interval,
		minReadySeconds: minReadySeconds,
		acceptedPods:    sets.NewString(),
		getDeploymentPodStore: func(deployment *kapi.ReplicationController) (cache.Store, chan struct{}) {
			selector := labels.Set(deployment.Spec.Selector).AsSelector()
			store := cache.NewStore(cache.MetaNamespaceKeyFunc)
			lw := &cache.ListWatch{
				ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
					options.LabelSelector = selector
					return kclient.Pods(deployment.Namespace).List(options)
				},
				WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
					options.LabelSelector = selector
					return kclient.Pods(deployment.Namespace).Watch(options)
				},
			}
			stop := make(chan struct{})
			cache.NewReflector(lw, &kapi.Pod{}, store, 10*time.Second).RunUntil(stop)
			return store, stop
		},
	}
}
Пример #26
0
// TestHandle_runningPod ensures that a running deployer pod results in a
// transition of the deployment's status to running.
func TestHandle_runningPod(t *testing.T) {
	deployment, _ := deployutil.MakeDeployment(deploytest.OkDeploymentConfig(1), kapi.Codecs.LegacyCodec(deployapi.SchemeGroupVersion))
	deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(deployapi.DeploymentStatusPending)
	var updatedDeployment *kapi.ReplicationController

	kFake := &ktestclient.Fake{}
	kFake.PrependReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
		return true, deployment, nil
	})
	kFake.PrependReactor("update", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
		updatedDeployment = deployment
		return true, deployment, nil
	})

	controller := &DeployerPodController{
		store:   cache.NewStore(cache.MetaNamespaceKeyFunc),
		kClient: kFake,
	}

	err := controller.Handle(runningPod(deployment))
	if err != nil {
		t.Fatalf("unexpected error: %v", err)
	}

	if updatedDeployment == nil {
		t.Fatalf("expected deployment update")
	}

	if e, a := deployapi.DeploymentStatusRunning, deployutil.DeploymentStatusFor(updatedDeployment); e != a {
		t.Fatalf("expected updated deployment status %s, got %s", e, a)
	}
}
Пример #27
0
func NewKubeletProvider(uri *url.URL) (MetricsSourceProvider, error) {
	// create clients
	kubeConfig, kubeletConfig, err := GetKubeConfigs(uri)
	if err != nil {
		return nil, err
	}
	kubeClient := kube_client.NewOrDie(kubeConfig)
	kubeletClient, err := NewKubeletClient(kubeletConfig)
	if err != nil {
		return nil, err
	}

	// Get nodes to test if the client is configured well. Watch gives less error information.
	if _, err := kubeClient.Nodes().List(kube_api.ListOptions{
		LabelSelector: labels.Everything(),
		FieldSelector: fields.Everything()}); err != nil {
		glog.Errorf("Failed to load nodes: %v", err)
	}

	// watch nodes
	lw := cache.NewListWatchFromClient(kubeClient, "nodes", kube_api.NamespaceAll, fields.Everything())
	nodeLister := &cache.StoreToNodeLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}
	reflector := cache.NewReflector(lw, &kube_api.Node{}, nodeLister.Store, time.Hour)
	reflector.Run()

	return &kubeletProvider{
		nodeLister:    nodeLister,
		reflector:     reflector,
		kubeletClient: kubeletClient,
	}, nil
}
Пример #28
0
func TestSAR(t *testing.T) {
	store := cache.NewStore(cache.IndexFuncToKeyFuncAdapter(cache.MetaNamespaceIndexFunc))
	mockClient := &testclient.Fake{}
	mockClient.AddReactor("get", "namespaces", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
		return true, nil, fmt.Errorf("shouldn't get here")
	})
	projectcache.FakeProjectCache(mockClient, store, "")
	handler := &lifecycle{client: mockClient}

	tests := map[string]struct {
		kind     string
		resource string
	}{
		"subject access review": {
			kind:     "SubjectAccessReview",
			resource: "subjectaccessreviews",
		},
		"local subject access review": {
			kind:     "LocalSubjectAccessReview",
			resource: "localsubjectaccessreviews",
		},
	}

	for k, v := range tests {
		err := handler.Admit(admission.NewAttributesRecord(nil, v.kind, "foo", "name", v.resource, "", "CREATE", nil))
		if err != nil {
			t.Errorf("Unexpected error for %s returned from admission handler: %v", k, err)
		}
	}
}
Пример #29
0
// NewFakeBackendServices creates a new fake backend services manager.
func NewFakeBackendServices() *FakeBackendServices {
	return &FakeBackendServices{
		backendServices: cache.NewStore(func(obj interface{}) (string, error) {
			svc := obj.(*compute.BackendService)
			return svc.Name, nil
		}),
	}
}
Пример #30
0
func newFakeDisruptionController() (*DisruptionController, *pdbStates) {
	ps := &pdbStates{}

	dc := &DisruptionController{
		pdbLister:   cache.StoreToPodDisruptionBudgetLister{Store: cache.NewStore(controller.KeyFunc)},
		podLister:   cache.StoreToPodLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{})},
		rcLister:    cache.StoreToReplicationControllerLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})},
		rsLister:    cache.StoreToReplicaSetLister{Store: cache.NewStore(controller.KeyFunc)},
		dLister:     cache.StoreToDeploymentLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{})},
		getUpdater:  func() updater { return ps.Set },
		broadcaster: record.NewBroadcaster(),
	}

	dc.recorder = dc.broadcaster.NewRecorder(api.EventSource{Component: "disruption_test"})

	return dc, ps
}