Esempio n. 1
0
// Runs controller blocks until stopCh is closed
func (e *TokensController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()

	// Start controllers (to fill stores, call informers, fill work queues)
	go e.serviceAccountController.Run(stopCh)
	go e.secretController.Run(stopCh)

	// Wait for stores to fill
	for !e.serviceAccountController.HasSynced() || !e.secretController.HasSynced() {
		time.Sleep(100 * time.Millisecond)
	}

	// Spawn workers to process work queues
	for i := 0; i < workers; i++ {
		go wait.Until(e.syncServiceAccount, 0, stopCh)
		go wait.Until(e.syncSecret, 0, stopCh)
	}

	// Block until stop channel is closed
	<-stopCh

	// Shut down queues
	e.syncServiceAccountQueue.ShutDown()
	e.syncSecretQueue.ShutDown()
}
// Run begins quota controller using the specified number of workers
func (c *ClusterQuotaReconcilationController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()

	// Wait for the stores to sync before starting any work in this controller.
	ready := make(chan struct{})
	go c.waitForSyncedStores(ready, stopCh)
	select {
	case <-ready:
	case <-stopCh:
		return
	}

	// the controllers that replenish other resources to respond rapidly to state changes
	for _, replenishmentController := range c.replenishmentControllers {
		go replenishmentController.Run(stopCh)
	}

	// the workers that chug through the quota calculation backlog
	for i := 0; i < workers; i++ {
		go wait.Until(c.worker, time.Second, stopCh)
	}

	// the timer for how often we do a full recalculation across all quotas
	go wait.Until(func() { c.calculateAll() }, c.resyncPeriod, stopCh)

	<-stopCh
	glog.Infof("Shutting down ClusterQuotaReconcilationController")
	c.queue.ShutDown()
}
Esempio n. 3
0
func (im *realImageGCManager) Start() error {
	go wait.Until(func() {
		// Initial detection make detected time "unknown" in the past.
		var ts time.Time
		if im.initialized {
			ts = time.Now()
		}
		err := im.detectImages(ts)
		if err != nil {
			glog.Warningf("[imageGCManager] Failed to monitor images: %v", err)
		} else {
			im.initialized = true
		}
	}, 5*time.Minute, wait.NeverStop)

	// Start a goroutine periodically updates image cache.
	// TODO(random-liu): Merge this with the previous loop.
	go wait.Until(func() {
		images, err := im.runtime.ListImages()
		if err != nil {
			glog.Warningf("[imageGCManager] Failed to update image list: %v", err)
		} else {
			im.imageCache.set(images)
		}
	}, 30*time.Second, wait.NeverStop)

	return nil
}
Esempio n. 4
0
func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
	glog.Infof("Garbage Collector: Initializing")
	for _, monitor := range gc.monitors {
		go monitor.controller.Run(stopCh)
	}

	wait.PollInfinite(10*time.Second, func() (bool, error) {
		for _, monitor := range gc.monitors {
			if !monitor.controller.HasSynced() {
				glog.Infof("Garbage Collector: Waiting for resource monitors to be synced...")
				return false, nil
			}
		}
		return true, nil
	})
	glog.Infof("Garbage Collector: All monitored resources synced. Proceeding to collect garbage")

	// worker
	go wait.Until(gc.propagator.processEvent, 0, stopCh)

	for i := 0; i < workers; i++ {
		go wait.Until(gc.worker, 0, stopCh)
		go wait.Until(gc.orphanFinalizer, 0, stopCh)
	}
	Register()
	<-stopCh
	glog.Infof("Garbage Collector: Shutting down")
	gc.dirtyQueue.ShutDown()
	gc.orphanQueue.ShutDown()
	gc.propagator.eventQueue.ShutDown()
}
Esempio n. 5
0
// Run starts a background goroutine that watches for changes to services that
// have (or had) LoadBalancers=true and ensures that they have
// load balancers created and deleted appropriately.
// serviceSyncPeriod controls how often we check the cluster's services to
// ensure that the correct load balancers exist.
// nodeSyncPeriod controls how often we check the cluster's nodes to determine
// if load balancers need to be updated to point to a new set.
//
// It's an error to call Run() more than once for a given ServiceController
// object.
func (s *ServiceController) Run(workers int) {
	defer runtime.HandleCrash()
	go s.serviceController.Run(wait.NeverStop)
	for i := 0; i < workers; i++ {
		go wait.Until(s.worker, time.Second, wait.NeverStop)
	}
	nodeLW := cache.NewListWatchFromClient(s.kubeClient.Core().RESTClient(), "nodes", v1.NamespaceAll, fields.Everything())
	cache.NewReflector(nodeLW, &v1.Node{}, s.nodeLister.Store, 0).Run()
	go wait.Until(s.nodeSyncLoop, nodeSyncPeriod, wait.NeverStop)
}
// Run begins quota controller using the specified number of workers
func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	go rq.rqController.Run(stopCh)
	go rq.podController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go wait.Until(rq.worker, time.Second, stopCh)
	}
	go wait.Until(func() { rq.enqueueAll() }, rq.resyncPeriod(), stopCh)
	<-stopCh
	glog.Infof("Shutting down ResourceQuotaController")
	rq.queue.ShutDown()
}
Esempio n. 7
0
// Run starts an asynchronous loop that monitors the status of cluster nodes.
func (nc *NodeController) Run() {
	go func() {
		defer utilruntime.HandleCrash()

		if !cache.WaitForCacheSync(wait.NeverStop, nc.nodeInformer.Informer().HasSynced, nc.podInformer.Informer().HasSynced, nc.daemonSetInformer.Informer().HasSynced) {
			utilruntime.HandleError(errors.New("NodeController timed out while waiting for informers to sync..."))
			return
		}

		// Incorporate the results of node status pushed from kubelet to master.
		go wait.Until(func() {
			if err := nc.monitorNodeStatus(); err != nil {
				glog.Errorf("Error monitoring node status: %v", err)
			}
		}, nc.nodeMonitorPeriod, wait.NeverStop)

		// Managing eviction of nodes:
		// When we delete pods off a node, if the node was not empty at the time we then
		// queue an eviction watcher. If we hit an error, retry deletion.
		go wait.Until(func() {
			nc.evictorLock.Lock()
			defer nc.evictorLock.Unlock()
			for k := range nc.zonePodEvictor {
				nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) {
					obj, exists, err := nc.nodeStore.GetByKey(value.Value)
					if err != nil {
						glog.Warningf("Failed to get Node %v from the nodeStore: %v", value.Value, err)
					} else if !exists {
						glog.Warningf("Node %v no longer present in nodeStore!", value.Value)
					} else {
						node, _ := obj.(*v1.Node)
						zone := utilnode.GetZoneKey(node)
						EvictionsNumber.WithLabelValues(zone).Inc()
					}

					nodeUid, _ := value.UID.(string)
					remaining, err := deletePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, nc.daemonSetStore)
					if err != nil {
						utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err))
						return false, 0
					}

					if remaining {
						glog.Infof("Pods awaiting deletion due to NodeController eviction")
					}
					return true, 0
				})
			}
		}, nodeEvictionPeriod, wait.NeverStop)
	}()
}
Esempio n. 8
0
// It's an error to call Run() more than once for a given ServiceController
// object.
func (s *ServiceController) Run(workers int, stopCh <-chan struct{}) error {
	defer runtime.HandleCrash()
	go s.serviceController.Run(stopCh)
	go s.clusterController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go wait.Until(s.fedServiceWorker, time.Second, stopCh)
	}
	go wait.Until(s.clusterEndpointWorker, time.Second, stopCh)
	go wait.Until(s.clusterServiceWorker, time.Second, stopCh)
	go wait.Until(s.clusterSyncLoop, clusterSyncPeriod, stopCh)
	<-stopCh
	glog.Infof("Shutting down Federation Service Controller")
	s.queue.ShutDown()
	return nil
}
Esempio n. 9
0
func startKubelet(k KubeletBootstrap, podCfg *config.PodConfig, kc *KubeletConfig) {
	// start the kubelet
	go wait.Until(func() { k.Run(podCfg.Updates()) }, 0, wait.NeverStop)

	// start the kubelet server
	if kc.EnableServer {
		go wait.Until(func() {
			k.ListenAndServe(kc.Address, kc.Port, kc.TLSOptions, kc.Auth, kc.EnableDebuggingHandlers)
		}, 0, wait.NeverStop)
	}
	if kc.ReadOnlyPort > 0 {
		go wait.Until(func() {
			k.ListenAndServeReadOnly(kc.Address, kc.ReadOnlyPort)
		}, 0, wait.NeverStop)
	}
}
Esempio n. 10
0
func (r *ResourceCollector) Start() {
	// Get the cgroup containers for kubelet and docker
	kubeletContainer, err := getContainerNameForProcess(kubeletProcessName, "")
	dockerContainer, err := getContainerNameForProcess(dockerProcessName, dockerPidFile)
	if err == nil {
		systemContainers = map[string]string{
			stats.SystemContainerKubelet: kubeletContainer,
			stats.SystemContainerRuntime: dockerContainer,
		}
	} else {
		framework.Failf("Failed to get docker container name in test-e2e-node resource collector.")
	}

	wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
		var err error
		r.client, err = cadvisorclient.NewClient(fmt.Sprintf("http://localhost:%d/", cadvisorPort))
		if err == nil {
			return true, nil
		}
		return false, err
	})

	Expect(r.client).NotTo(BeNil(), "cadvisor client not ready")

	r.request = &cadvisorapiv2.RequestOptions{IdType: "name", Count: 1, Recursive: false}
	r.stopCh = make(chan struct{})

	oldStatsMap := make(map[string]*cadvisorapiv2.ContainerStats)
	go wait.Until(func() { r.collectStats(oldStatsMap) }, r.pollingInterval, r.stopCh)
}
Esempio n. 11
0
// TestGetClusterPolicy tests that a ReadOnlyPolicyClient GetPolicy() call correctly retrieves a cluster policy
// when the namespace given is equal to the empty string
func TestGetClusterPolicy(t *testing.T) {
	testClient, policyStopChannel, bindingStopChannel, testChannel := beforeTestingSetup_readonlycache()
	defer close(policyStopChannel)
	defer close(bindingStopChannel)

	var clusterPolicy *authorizationapi.Policy
	var err error

	namespace := ""
	context := kapi.WithNamespace(kapi.NewContext(), namespace)
	name := "uniqueClusterPolicyName"

	utilwait.Until(func() {
		clusterPolicy, err = testClient.GetPolicy(context, name)

		if (err == nil) &&
			(clusterPolicy != nil) &&
			(clusterPolicy.Name == name) &&
			(clusterPolicy.Namespace == namespace) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting cluster policy using GetPolicy(): %v", err)
	case clusterPolicy == nil:
		t.Error("Policy is nil")
	case clusterPolicy.Name != name:
		t.Errorf("Expected policy.Name to be '%s', but got '%s'", name, clusterPolicy.Name)
	case clusterPolicy.Namespace != "":
		t.Errorf("Expected policy.Namespace to be '%s', but got '%s'", namespace, clusterPolicy.Namespace)
	}
}
func (cm *containerManagerImpl) Start() error {
	// Setup the node
	if err := cm.setupNode(); err != nil {
		return err
	}
	// Don't run a background thread if there are no ensureStateFuncs.
	numEnsureStateFuncs := 0
	for _, cont := range cm.systemContainers {
		if cont.ensureStateFunc != nil {
			numEnsureStateFuncs++
		}
	}
	if numEnsureStateFuncs == 0 {
		return nil
	}

	// Run ensure state functions every minute.
	go wait.Until(func() {
		for _, cont := range cm.systemContainers {
			if cont.ensureStateFunc != nil {
				if err := cont.ensureStateFunc(cont.manager); err != nil {
					glog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err)
				}
			}
		}
	}, time.Minute, wait.NeverStop)

	return nil
}
func (frsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
	go frsc.replicaSetController.Run(stopCh)
	frsc.fedReplicaSetInformer.Start()
	frsc.fedPodInformer.Start()

	frsc.replicasetDeliverer.StartWithHandler(func(item *fedutil.DelayingDelivererItem) {
		frsc.replicasetWorkQueue.Add(item.Key)
	})
	frsc.clusterDeliverer.StartWithHandler(func(_ *fedutil.DelayingDelivererItem) {
		frsc.reconcileReplicaSetsOnClusterChange()
	})

	for !frsc.isSynced() {
		time.Sleep(5 * time.Millisecond)
	}

	for i := 0; i < workers; i++ {
		go wait.Until(frsc.worker, time.Second, stopCh)
	}

	fedutil.StartBackoffGC(frsc.replicaSetBackoff, stopCh)

	<-stopCh
	glog.Infof("Shutting down ReplicaSetController")
	frsc.replicasetDeliverer.Stop()
	frsc.clusterDeliverer.Stop()
	frsc.replicasetWorkQueue.ShutDown()
	frsc.fedReplicaSetInformer.Stop()
	frsc.fedPodInformer.Stop()
}
Esempio n. 14
0
// TestPolicyGet tests that a Get() call to the ReadOnlyPolicyCache will retrieve the correct policy
func TestPolicyGet(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
	defer close(cacheChannel)

	var policy *authorizationapi.Policy
	var err error

	namespace := "namespaceTwo"
	name := "uniquePolicyName"

	utilwait.Until(func() {
		policy, err = testCache.Get(name, namespace)

		if (err == nil) &&
			(policy != nil) &&
			(policy.Name == name) &&
			(policy.Namespace == namespace) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policy using ReadOnlyPolicyCache: %v", err)
	case policy == nil:
		t.Error("Policy is nil")
	case policy.Name != name:
		t.Errorf("Expected policy name to be '%s', was '%s'", name, policy.Name)
	case policy.Namespace != namespace:
		t.Errorf("Expected policy namespace to be '%s', was '%s'", namespace, policy.Namespace)
	}
}
Esempio n. 15
0
// RunUntil starts the controller until the provided ch is closed.
func (c *Repair) RunUntil(ch chan struct{}) {
	utilwait.Until(func() {
		if err := c.RunOnce(); err != nil {
			utilruntime.HandleError(err)
		}
	}, c.interval, ch)
}
Esempio n. 16
0
// TestClusterPolicyGet tests that a Get() call to the ReadOnlyClusterPolicyCache will retrieve the correct clusterPolicy
func TestClusterPolicyGet(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicycache()
	defer close(cacheChannel)

	var clusterPolicy *authorizationapi.ClusterPolicy
	var err error

	name := "uniqueClusterPolicyName"

	utilwait.Until(func() {
		clusterPolicy, err = testCache.Get(name)

		if (err == nil) &&
			(clusterPolicy != nil) &&
			(clusterPolicy.Name == name) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting clusterPolicy using ReadOnlyClusterPolicyCache: %v", err)
	case clusterPolicy == nil:
		t.Error("ClusterPolicy is nil.")
	case clusterPolicy.Name != name:
		t.Errorf("Expected clusterPolicy name to be '%s', was '%s'", name, clusterPolicy.Name)
	}
}
Esempio n. 17
0
func startKubelet(k kubelet.KubeletBootstrap, podCfg *config.PodConfig, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps) {
	// start the kubelet
	go wait.Until(func() { k.Run(podCfg.Updates()) }, 0, wait.NeverStop)

	// start the kubelet server
	if kubeCfg.EnableServer {
		go wait.Until(func() {
			k.ListenAndServe(net.ParseIP(kubeCfg.Address), uint(kubeCfg.Port), kubeDeps.TLSOptions, kubeDeps.Auth, kubeCfg.EnableDebuggingHandlers)
		}, 0, wait.NeverStop)
	}
	if kubeCfg.ReadOnlyPort > 0 {
		go wait.Until(func() {
			k.ListenAndServeReadOnly(net.ParseIP(kubeCfg.Address), uint(kubeCfg.ReadOnlyPort))
		}, 0, wait.NeverStop)
	}
}
func (e *DockercfgController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()

	// Wait for the store to sync before starting any work in this controller.
	ready := make(chan struct{})
	go e.waitForDockerURLs(ready, stopCh)
	select {
	case <-ready:
	case <-stopCh:
		return
	}
	glog.Infof("Dockercfg secret controller initialized, starting.")

	go e.serviceAccountController.Run(stopCh)
	go e.secretController.Run(stopCh)
	for !e.serviceAccountController.HasSynced() || !e.secretController.HasSynced() {
		time.Sleep(100 * time.Millisecond)
	}

	for i := 0; i < workers; i++ {
		go wait.Until(e.worker, time.Second, stopCh)
	}

	<-stopCh
	glog.Infof("Shutting down dockercfg secret controller")
	e.queue.ShutDown()
}
Esempio n. 19
0
// TestPolicyBindingList tests that a List() call for a namespace to the ReadOnlyPolicyBindingCache will return all policyBindings in that namespace
func TestPolicyBindingList(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicybindingcache()
	defer close(cacheChannel)

	var policyBindings *authorizationapi.PolicyBindingList
	var err error

	namespace := "namespaceTwo"

	utilwait.Until(func() {
		policyBindings, err = testCache.List(nil, namespace)

		if (err == nil) &&
			(policyBindings != nil) &&
			(len(policyBindings.Items) == 2) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policyBindingList using ReadOnlyPolicyBindingCache: %v", err)
	case policyBindings == nil:
		t.Error("PolicyBindingList is nil.")
	case len(policyBindings.Items) != 2:
		t.Errorf("Expected policyBindingList to have 2 items, had %d", len(policyBindings.Items))
	}
}
Esempio n. 20
0
// TestPolicyBindingGetRespectingNamespaces tests that a Get() call to the ReadOnlyPolicyBindingCache will retrieve the correct policy binding
// when the name is an nonUnique identifier but the set {name, namespace} is not
func TestPolicyBindingGetRespectingNamespaces(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicybindingcache()
	defer close(cacheChannel)

	var policyBinding *authorizationapi.PolicyBinding
	var err error

	namespace := "namespaceOne"
	name := "nonUniquePolicyBindingName"

	utilwait.Until(func() {
		policyBinding, err = testCache.Get(name, namespace)

		if (err == nil) &&
			(policyBinding != nil) &&
			(policyBinding.Name == name) &&
			(policyBinding.Namespace == namespace) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policyBinding using ReadOnlyPolicyBindingCache: %v", err)
	case policyBinding == nil:
		t.Error("PolicyBinding is nil.")
	case policyBinding.Name != name:
		t.Errorf("Expected policyBinding name to be '%s', was '%s'", name, policyBinding.Name)
	case policyBinding.Namespace != namespace:
		t.Errorf("Expected policyBinding namespace to be '%s', was '%s'", namespace, policyBinding.Namespace)
	}
}
Esempio n. 21
0
// TestPolicyBindingListRespectingFields tests that a List() call for some namespace, filtered with a field to the ReadOnlyPolicyBindingCache
// will return all policyBindings in that namespace matching that field
func TestPolicyBindingListRespectingFields(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicybindingcache()
	defer close(cacheChannel)

	var policyBindings *authorizationapi.PolicyBindingList
	var err error

	name := "uniquePolicyBindingName"
	namespace := "namespaceTwo"
	field := fields.OneTermEqualSelector("metadata.name", name)

	utilwait.Until(func() {
		policyBindings, err = testCache.List(&kapi.ListOptions{FieldSelector: field}, namespace)

		if (err == nil) &&
			(policyBindings != nil) &&
			(len(policyBindings.Items) == 1) &&
			(policyBindings.Items[0].Name == name) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policyBindingList using ReadOnlyPolicyBindingCache: %v", err)
	case policyBindings == nil:
		t.Error("PolicyBindingList is nil.")
	case len(policyBindings.Items) != 1:
		t.Errorf("Expected policyBindingList to have 1 item, had %d", len(policyBindings.Items))
	case policyBindings.Items[0].Name != name:
		t.Errorf("Expected policyBinding name to be '%s', was '%s'", name, policyBindings.Items[0].Name)
	}
}
Esempio n. 22
0
func (fdc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
	go fdc.deploymentController.Run(stopCh)
	fdc.fedDeploymentInformer.Start()
	fdc.fedPodInformer.Start()

	fdc.deploymentDeliverer.StartWithHandler(func(item *fedutil.DelayingDelivererItem) {
		fdc.deploymentWorkQueue.Add(item.Key)
	})
	fdc.clusterDeliverer.StartWithHandler(func(_ *fedutil.DelayingDelivererItem) {
		fdc.reconcileDeploymentsOnClusterChange()
	})

	// Wait until the cluster is synced to prevent the update storm at the very beginning.
	for !fdc.isSynced() {
		time.Sleep(5 * time.Millisecond)
		glog.V(3).Infof("Waiting for controller to sync up")
	}

	for i := 0; i < workers; i++ {
		go wait.Until(fdc.worker, time.Second, stopCh)
	}

	fedutil.StartBackoffGC(fdc.deploymentBackoff, stopCh)

	<-stopCh
	glog.Infof("Shutting down DeploymentController")
	fdc.deploymentDeliverer.Stop()
	fdc.clusterDeliverer.Stop()
	fdc.deploymentWorkQueue.ShutDown()
	fdc.fedDeploymentInformer.Stop()
	fdc.fedPodInformer.Stop()
}
Esempio n. 23
0
// TestClusterPolicyListRespectingFields tests that a List() call, filtered with a field to the ReadOnlyClusterPolicyCache
// will return all clusterPolicies matching that field
func TestClusterPolicyListRespectingFields(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicycache()
	defer close(cacheChannel)

	var clusterPolicies *authorizationapi.ClusterPolicyList
	var err error

	name := "uniqueClusterPolicyName"
	field := fields.OneTermEqualSelector("metadata.name", name)

	utilwait.Until(func() {
		clusterPolicies, err = testCache.List(&kapi.ListOptions{FieldSelector: field})

		if (err == nil) &&
			(clusterPolicies != nil) &&
			(len(clusterPolicies.Items) == 1) &&
			(clusterPolicies.Items[0].Name == name) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting clusterPolicyList with fieldSelector using ReadOnlyClusterPolicyCache: %v", err)
	case clusterPolicies == nil:
		t.Error("ClusterPolicyList is nil.")
	case len(clusterPolicies.Items) != 1:
		t.Errorf("Expected clusterPolicyList to contain 2 clusterPolicies, contained %d", len(clusterPolicies.Items))
	case clusterPolicies.Items[0].Name != name:
		t.Errorf("Expected field-selected clusterPolicy name to be '%s', was '%s'", name, clusterPolicies.Items[0].Name)
	}
}
Esempio n. 24
0
// TestListClusterPolicyBindings tests that a ReadOnlyPolicyClient ListPolicyBindings() call correctly lists cluster policy bindings
// when the namespace given is the empty string
func TestListClusterPolicyBindings(t *testing.T) {
	testClient, policyStopChannel, bindingStopChannel, testChannel := beforeTestingSetup_readonlycache()
	defer close(policyStopChannel)
	defer close(bindingStopChannel)

	var clusterPolicyBindings *authorizationapi.PolicyBindingList
	var err error

	namespace := ""
	context := kapi.WithNamespace(kapi.NewContext(), namespace)

	utilwait.Until(func() {
		clusterPolicyBindings, err = testClient.ListPolicyBindings(context, nil)

		if (err == nil) &&
			(clusterPolicyBindings != nil) &&
			(len(clusterPolicyBindings.Items) == 2) &&
			(strings.Contains(clusterPolicyBindings.Items[0].Name, "Cluster")) &&
			(strings.Contains(clusterPolicyBindings.Items[1].Name, "Cluster")) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting cluster policy binding using ListPolicyBindings(): %v", err)
	case clusterPolicyBindings == nil:
		t.Error("ClusterPolicyBindingsList is nil")
	case len(clusterPolicyBindings.Items) != 2:
		t.Errorf("ClusterPolicyBindingsList contains %d items, should contain 2.", len(clusterPolicyBindings.Items))
	case !strings.Contains(clusterPolicyBindings.Items[0].Name, "Cluster") || !strings.Contains(clusterPolicyBindings.Items[1].Name, "Cluster"):
		t.Error("ClusterPolicyBinding name should contain \"Cluster\", but did not.")
	}
}
Esempio n. 25
0
// TestClusterPolicyList tests that a List() call to the ReadOnlyClusterPolicyCache will return all clusterPolicies
func TestClusterPolicyList(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicycache()
	defer close(cacheChannel)

	var clusterPolicies *authorizationapi.ClusterPolicyList
	var err error

	utilwait.Until(func() {
		clusterPolicies, err = testCache.List(nil)

		if (err == nil) &&
			(clusterPolicies != nil) &&
			(len(clusterPolicies.Items) == 2) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting clusterPolicyList using ReadOnlyClusterPolicyCache: %v", err)
	case clusterPolicies == nil:
		t.Error("ClusterPolicyList is nil.")
	case len(clusterPolicies.Items) != 2:
		t.Errorf("Expected clusterPolicyList to contain 2 clusterPolicies, contained %d", len(clusterPolicies.Items))
	}
}
Esempio n. 26
0
func (rc *RouteController) Run(syncPeriod time.Duration) {
	go wait.Until(func() {
		if err := rc.reconcileNodeRoutes(); err != nil {
			glog.Errorf("Couldn't reconcile node routes: %v", err)
		}
	}, syncPeriod, wait.NeverStop)
}
Esempio n. 27
0
// Runs e; will not return until stopCh is closed. workers determines how many
// endpoints will be handled in parallel.
func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	defer e.queue.ShutDown()

	go e.serviceController.Run(stopCh)
	go e.podController.Run(stopCh)

	if !cache.WaitForCacheSync(stopCh, e.podStoreSynced) {
		return
	}

	for i := 0; i < workers; i++ {
		go wait.Until(e.worker, time.Second, stopCh)
	}
	go func() {
		defer utilruntime.HandleCrash()
		time.Sleep(5 * time.Minute) // give time for our cache to fill
		e.checkLeftoverEndpoints()
	}()

	if e.internalPodInformer != nil {
		go e.internalPodInformer.Run(stopCh)
	}

	<-stopCh
}
Esempio n. 28
0
// TestPolicyListNamespaceAll tests that a List() call for kapi.NamespaceAll to the ReadOnlyPolicyCache will return
// all policies in all namespaces
func TestPolicyListNamespaceAll(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
	defer close(cacheChannel)

	var policies *authorizationapi.PolicyList
	var err error

	namespace := kapi.NamespaceAll

	utilwait.Until(func() {
		policies, err = testCache.List(nil, namespace)

		if (err == nil) &&
			(policies != nil) &&
			(len(policies.Items) == 3) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policies using ReadOnlyPolicyCache: %v", err)
	case policies == nil:
		t.Error("PoliciesList is nil")
	case len(policies.Items) != 3:
		t.Errorf("Expected policyList to have 3 policies, had %d", len(policies.Items))
	}
}
Esempio n. 29
0
func (a *HorizontalController) Run(syncPeriod time.Duration) {
	go wait.Until(func() {
		if err := a.reconcileAutoscalers(); err != nil {
			glog.Errorf("Couldn't reconcile horizontal pod autoscalers: %v", err)
		}
	}, syncPeriod, wait.NeverStop)
}
Esempio n. 30
0
// Run begins watching and syncing.
func (e *quotaEvaluator) Run(workers int) {
	defer utilruntime.HandleCrash()

	for i := 0; i < workers; i++ {
		go wait.Until(e.doWork, time.Second, make(chan struct{}))
	}
}