Esempio n. 1
0
func doWork() ([]source_api.Source, sinks.ExternalSinkManager, manager.Manager, error) {
	c := cache.NewCache(*argCacheDuration, time.Minute)
	sources, err := newSources(c)
	if err != nil {
		return nil, nil, nil, err
	}
	sinkManager, err := sinks.NewExternalSinkManager(nil)
	if err != nil {
		return nil, nil, nil, err
	}
	manager, err := manager.NewManager(sources, sinkManager, *argStatsResolution, *argCacheDuration, c, *argUseModel, *argModelResolution, *argAlignStats)
	if err != nil {
		return nil, nil, nil, err
	}
	if err := manager.SetSinkUris(argSinks); err != nil {
		return nil, nil, nil, err
	}

	// Spawn the Model Housekeeping goroutine even if the model is not enabled.
	// This will allow the model to be activated/deactivated in runtime.
	modelDuration := 2 * *argModelResolution
	if (*argCacheDuration).Nanoseconds() < modelDuration.Nanoseconds() {
		modelDuration = *argCacheDuration
	}
	go util.Until(manager.HousekeepModel, modelDuration, util.NeverStop)

	go util.Until(manager.Housekeep, *argPollDuration, util.NeverStop)
	return sources, sinkManager, manager, nil
}
func (cm *containerManagerImpl) Start() error {
	// Don't run a background thread if there are no ensureStateFuncs.
	numEnsureStateFuncs := 0
	for _, cont := range cm.systemContainers {
		if cont.ensureStateFunc != nil {
			numEnsureStateFuncs++
		}
	}
	if numEnsureStateFuncs == 0 {
		return nil
	}

	// Run ensure state functions every minute.
	go util.Until(func() {
		for _, cont := range cm.systemContainers {
			if cont.ensureStateFunc != nil {
				if err := cont.ensureStateFunc(cont.manager); err != nil {
					glog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err)
				}
			}
		}
	}, time.Minute, util.NeverStop)

	return nil
}
Esempio n. 3
0
// TestClusterPolicyListRespectingFields tests that a List() call, filtered with a field to the ReadOnlyClusterPolicyCache
// will return all clusterPolicies matching that field
func TestClusterPolicyListRespectingFields(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicycache()
	defer close(cacheChannel)

	var clusterPolicies *authorizationapi.ClusterPolicyList
	var err error

	name := "uniqueClusterPolicyName"
	label := labels.Everything()
	field := fields.OneTermEqualSelector("metadata.name", name)

	util.Until(func() {
		clusterPolicies, err = testCache.List(label, field)

		if (err == nil) &&
			(clusterPolicies != nil) &&
			(len(clusterPolicies.Items) == 1) &&
			(clusterPolicies.Items[0].Name == name) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting clusterPolicyList with fieldSelector using ReadOnlyClusterPolicyCache: %v", err)
	case clusterPolicies == nil:
		t.Error("ClusterPolicyList is nil.")
	case len(clusterPolicies.Items) != 1:
		t.Errorf("Expected clusterPolicyList to contain 2 clusterPolicies, contained %d", len(clusterPolicies.Items))
	case clusterPolicies.Items[0].Name != name:
		t.Errorf("Expected field-selected clusterPolicy name to be '%s', was '%s'", name, clusterPolicies.Items[0].Name)
	}
}
// TestPolicyBindingListNamespaceAll tests that a List() call for kapi.NamespaceAll to the ReadOnlyPolicyBindingCache will return
// all policyBindings in all namespaces
func TestPolicyBindingListNamespaceAll(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicybindingcache()
	defer close(cacheChannel)

	var policyBindings *authorizationapi.PolicyBindingList
	var err error

	namespace := kapi.NamespaceAll
	label := labels.Everything()
	field := fields.Everything()

	util.Until(func() {
		policyBindings, err = testCache.List(label, field, namespace)

		if (err == nil) &&
			(policyBindings != nil) &&
			(len(policyBindings.Items) == 3) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policyBindingList using ReadOnlyPolicyBindingCache: %v", err)
	case policyBindings == nil:
		t.Error("PolicyBindingList is nil.")
	case len(policyBindings.Items) != 3:
		t.Errorf("Expected policyBindingList to have 3 items, had %d", len(policyBindings.Items))
	}
}
// TestPolicyBindingGet tests that a Get() call to the ReadOnlyPolicyBindingCache will retrieve the correct policy binding
func TestPolicyBindingGet(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicybindingcache()
	defer close(cacheChannel)

	var policyBinding *authorizationapi.PolicyBinding
	var err error

	namespace := "namespaceTwo"
	name := "uniquePolicyBindingName"

	util.Until(func() {
		policyBinding, err = testCache.Get(name, namespace)

		if (err == nil) &&
			(policyBinding != nil) &&
			(policyBinding.Name == name) &&
			(policyBinding.Namespace == namespace) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policyBinding using ReadOnlyPolicyBindingCache: %v", err)
	case policyBinding == nil:
		t.Error("PolicyBinding is nil.")
	case policyBinding.Name != name:
		t.Errorf("Expected policyBinding name to be '%s', was '%s'", name, policyBinding.Name)
	case policyBinding.Namespace != namespace:
		t.Errorf("Expected policyBinding namespace to be '%s', was '%s'", namespace, policyBinding.Namespace)
	}
}
Esempio n. 6
0
// RunKubernetesService periodically updates the kubernetes service
func (c *Controller) RunKubernetesService(ch chan struct{}) {
	util.Until(func() {
		if err := c.UpdateKubernetesService(); err != nil {
			util.HandleError(fmt.Errorf("unable to sync kubernetes service: %v", err))
		}
	}, c.EndpointInterval, ch)
}
Esempio n. 7
0
// TestGetClusterPolicy tests that a ReadOnlyPolicyClient GetPolicy() call correctly retrieves a cluster policy
// when the namespace given is equal to the empty string
func TestGetClusterPolicy(t *testing.T) {
	testClient, policyStopChannel, bindingStopChannel, testChannel := beforeTestingSetup_readonlycache()
	defer close(policyStopChannel)
	defer close(bindingStopChannel)

	var clusterPolicy *authorizationapi.Policy
	var err error

	namespace := ""
	context := kapi.WithNamespace(kapi.NewContext(), namespace)
	name := "uniqueClusterPolicyName"

	util.Until(func() {
		clusterPolicy, err = testClient.GetPolicy(context, name)

		if (err == nil) &&
			(clusterPolicy != nil) &&
			(clusterPolicy.Name == name) &&
			(clusterPolicy.Namespace == namespace) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting cluster policy using GetPolicy(): %v", err)
	case clusterPolicy == nil:
		t.Error("Policy is nil")
	case clusterPolicy.Name != name:
		t.Errorf("Expected policy.Name to be '%s', but got '%s'", name, clusterPolicy.Name)
	case clusterPolicy.Namespace != "":
		t.Errorf("Expected policy.Namespace to be '%s', but got '%s'", namespace, clusterPolicy.Namespace)
	}
}
Esempio n. 8
0
// TestClusterPolicyGet tests that a Get() call to the ReadOnlyClusterPolicyCache will retrieve the correct clusterPolicy
func TestClusterPolicyGet(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicycache()
	defer close(cacheChannel)

	var clusterPolicy *authorizationapi.ClusterPolicy
	var err error

	name := "uniqueClusterPolicyName"

	util.Until(func() {
		clusterPolicy, err = testCache.Get(name)

		if (err == nil) &&
			(clusterPolicy != nil) &&
			(clusterPolicy.Name == name) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting clusterPolicy using ReadOnlyClusterPolicyCache: %v", err)
	case clusterPolicy == nil:
		t.Error("ClusterPolicy is nil.")
	case clusterPolicy.Name != name:
		t.Errorf("Expected clusterPolicy name to be '%s', was '%s'", name, clusterPolicy.Name)
	}
}
Esempio n. 9
0
// RunUntil starts the controller until the provided ch is closed.
func (c *Repair) RunUntil(ch chan struct{}) {
	util.Until(func() {
		if err := c.RunOnce(); err != nil {
			util.HandleError(err)
		}
	}, c.interval, ch)
}
Esempio n. 10
0
// TestClusterPolicyList tests that a List() call to the ReadOnlyClusterPolicyCache will return all clusterPolicies
func TestClusterPolicyList(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicycache()
	defer close(cacheChannel)

	var clusterPolicies *authorizationapi.ClusterPolicyList
	var err error

	label := labels.Everything()
	field := fields.Everything()

	util.Until(func() {
		clusterPolicies, err = testCache.List(label, field)

		if (err == nil) &&
			(clusterPolicies != nil) &&
			(len(clusterPolicies.Items) == 2) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting clusterPolicyList using ReadOnlyClusterPolicyCache: %v", err)
	case clusterPolicies == nil:
		t.Error("ClusterPolicyList is nil.")
	case len(clusterPolicies.Items) != 2:
		t.Errorf("Expected clusterPolicyList to contain 2 clusterPolicies, contained %d", len(clusterPolicies.Items))
	}
}
Esempio n. 11
0
// TestPolicyList tests that a List() call for a namespace to the ReadOnlyPolicyCache will return all policies in that namespace
func TestPolicyList(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
	defer close(cacheChannel)

	var policies *authorizationapi.PolicyList
	var err error

	namespace := "namespaceTwo"
	label := labels.Everything()
	field := fields.Everything()

	util.Until(func() {
		policies, err = testCache.List(label, field, namespace)

		if (err == nil) &&
			(policies != nil) &&
			(len(policies.Items) == 2) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policies using ReadOnlyPolicyCache: %v", err)
	case policies == nil:
		t.Error("PoliciesList is nil")
	case len(policies.Items) != 2:
		t.Errorf("Expected policyList to have 2 policies, had %d", len(policies.Items))
	}
}
Esempio n. 12
0
// TestPolicyGetRespectingNamespaces tests that a Get() call to the ReadOnlyPolicyCache will retrieve the correct policy when the name is
// an nonUnique identifier but the set {name, namespace} is not
func TestPolicyGetRespectingNamespaces(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
	defer close(cacheChannel)

	var policy *authorizationapi.Policy
	var err error

	namespace := "namespaceOne"
	name := "nonUniquePolicyName"

	util.Until(func() {
		policy, err = testCache.Get(name, namespace)

		if (err == nil) &&
			(policy != nil) &&
			(policy.Name == name) &&
			(policy.Namespace == namespace) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policy using ReadOnlyPolicyCache: %v", err)
	case policy == nil:
		t.Error("Policy is nil")
	case policy.Name != name:
		t.Errorf("Expected policy name to be '%s', was '%s'", name, policy.Name)
	case policy.Namespace != namespace:
		t.Errorf("Expected policy namespace to be '%s', was '%s'", namespace, policy.Namespace)
	}
}
Esempio n. 13
0
// TestPolicyListRespectingFields tests that a List() call for some namespace, filtered with a field to the ReadOnlyPolicyCache
// will return all policies in that namespace matching that field
func TestPolicyListRespectingFields(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
	defer close(cacheChannel)

	var policies *authorizationapi.PolicyList
	var err error

	name := "uniquePolicyName"
	namespace := "namespaceTwo"
	label := labels.Everything()
	field := fields.OneTermEqualSelector("metadata.name", name)

	util.Until(func() {
		policies, err = testCache.List(label, field, namespace)

		if (err == nil) &&
			(policies != nil) &&
			(len(policies.Items) == 1) &&
			(policies.Items[0].Name == name) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policies using ReadOnlyPolicyCache: %v", err)
	case policies == nil:
		t.Error("PoliciesList is nil")
	case len(policies.Items) != 1:
		t.Errorf("Expected policyList to have 1 policy, had %d", len(policies.Items))
	case policies.Items[0].Name != name:
		t.Errorf("Expected policy name to be '%s', was '%s'", name, policies.Items[0].Name)
	}
}
// Runs e; will not return until stopCh is closed. workers determines how many
// endpoints will be handled in parallel.
func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) {
	defer util.HandleCrash()
	go e.serviceController.Run(stopCh)
	go e.podController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go util.Until(e.worker, time.Second, stopCh)
	}
	<-stopCh
	e.queue.ShutDown()
}
Esempio n. 15
0
func (m *Master) setupSecureProxy(user, privateKeyfile, publicKeyfile string) {
	// Sync loop to ensure that the SSH key has been installed.
	go util.Until(func() {
		if m.installSSHKey == nil {
			glog.Error("Won't attempt to install ssh key: installSSHKey function is nil")
			return
		}
		key, err := util.ParsePublicKeyFromFile(publicKeyfile)
		if err != nil {
			glog.Errorf("Failed to load public key: %v", err)
			return
		}
		keyData, err := util.EncodeSSHKey(key)
		if err != nil {
			glog.Errorf("Failed to encode public key: %v", err)
			return
		}
		if err := m.installSSHKey(user, keyData); err != nil {
			glog.Errorf("Failed to install ssh key: %v", err)
		}
	}, 5*time.Minute, util.NeverStop)
	// Sync loop for tunnels
	// TODO: switch this to watch.
	go util.Until(func() {
		if err := m.loadTunnels(user, privateKeyfile); err != nil {
			glog.Errorf("Failed to load SSH Tunnels: %v", err)
		}
		if m.tunnels != nil && m.tunnels.Len() != 0 {
			// Sleep for 10 seconds if we have some tunnels.
			// TODO (cjcullen): tunnels can lag behind actually existing nodes.
			time.Sleep(9 * time.Second)
		}
	}, 1*time.Second, util.NeverStop)
	// Refresh loop for tunnels
	// TODO: could make this more controller-ish
	go util.Until(func() {
		time.Sleep(5 * time.Minute)
		if err := m.refreshTunnels(user, privateKeyfile); err != nil {
			glog.Errorf("Failed to refresh SSH Tunnels: %v", err)
		}
	}, 0*time.Second, util.NeverStop)
}
// Run begins watching and syncing.
func (rm *ReplicationManager) Run(workers int, stopCh <-chan struct{}) {
	defer util.HandleCrash()
	go rm.rcController.Run(stopCh)
	go rm.podController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go util.Until(rm.worker, time.Second, stopCh)
	}
	<-stopCh
	glog.Infof("Shutting down RC Manager")
	rm.queue.ShutDown()
}
Esempio n. 17
0
func (cm *containerManagerImpl) Start() error {
	if cm.dockerContainerName != "" {
		go util.Until(func() {
			err := cm.ensureDockerInContainer()
			if err != nil {
				glog.Warningf("[ContainerManager] Failed to ensure Docker is in a container: %v", err)
			}
		}, time.Minute, util.NeverStop)
	}
	return nil
}
Esempio n. 18
0
func NewCache(bufferDuration, gcDuration time.Duration) Cache {
	rc := &realCache{
		pods:           make(map[string]*podElement),
		nodes:          make(map[string]*nodeElement),
		events:         store.NewGCStore(store.NewTimeStore(), bufferDuration),
		eventUIDs:      make(map[string]struct{}),
		bufferDuration: bufferDuration,
	}
	go util.Until(rc.runGC, gcDuration, util.NeverStop)
	return rc
}
// Run begins processing items, and will continue until a value is sent down stopCh.
// It's an error to call Run more than once.
// Run blocks; call via go.
func (c *Controller) Run(stopCh <-chan struct{}) {
	defer util.HandleCrash()
	cache.NewReflector(
		c.config.ListerWatcher,
		c.config.ObjectType,
		c.config.Queue,
		c.config.FullResyncPeriod,
	).RunUntil(stopCh)

	util.Until(c.processLoop, time.Second, stopCh)
}
Esempio n. 20
0
func newPortRangeAllocator(r util.PortRange) PortAllocator {
	if r.Base == 0 || r.Size == 0 {
		panic("illegal argument: may not specify an empty port range")
	}
	ra := &rangeAllocator{
		PortRange: r,
		ports:     make(chan int, portsBufSize),
		rand:      rand.New(rand.NewSource(time.Now().UnixNano())),
	}
	go util.Until(func() { ra.fillPorts(util.NeverStop) }, nextFreePortCooldown, util.NeverStop)
	return ra
}
func TestUpdatePods(t *testing.T) {
	fakeWatch := watch.NewFake()
	client := &testclient.Fake{Watch: fakeWatch}
	manager := NewReplicationManager(client, BurstReplicas)
	manager.podStoreSynced = alwaysReady

	received := make(chan string)

	manager.syncHandler = func(key string) error {
		obj, exists, err := manager.rcStore.Store.GetByKey(key)
		if !exists || err != nil {
			t.Errorf("Expected to find controller under key %v", key)
		}
		received <- obj.(*api.ReplicationController).Name
		return nil
	}

	stopCh := make(chan struct{})
	defer close(stopCh)
	go util.Until(manager.worker, 10*time.Millisecond, stopCh)

	// Put 2 rcs and one pod into the controller's stores
	testControllerSpec1 := newReplicationController(1)
	manager.rcStore.Store.Add(testControllerSpec1)
	testControllerSpec2 := *testControllerSpec1
	testControllerSpec2.Spec.Selector = map[string]string{"bar": "foo"}
	testControllerSpec2.Name = "barfoo"
	manager.rcStore.Store.Add(&testControllerSpec2)

	// Put one pod in the podStore
	pod1 := newPodList(manager.podStore.Store, 1, api.PodRunning, testControllerSpec1).Items[0]
	pod2 := pod1
	pod2.Labels = testControllerSpec2.Spec.Selector

	// Send an update of the same pod with modified labels, and confirm we get a sync request for
	// both controllers
	manager.updatePod(&pod1, &pod2)

	expected := util.NewStringSet(testControllerSpec1.Name, testControllerSpec2.Name)
	for _, name := range expected.List() {
		t.Logf("Expecting update for %+v", name)
		select {
		case got := <-received:
			if !expected.Has(got) {
				t.Errorf("Expected keys %#v got %v", expected, got)
			}
		case <-time.After(controllerTimeout):
			t.Errorf("Expected update notifications for controllers within 100ms each")
		}
	}
}
Esempio n. 22
0
// Runs e; will not return until stopCh is closed. workers determines how many
// endpoints will be handled in parallel.
func (e *endpointController) Run(workers int, stopCh <-chan struct{}) {
	defer util.HandleCrash()
	go e.serviceController.Run(stopCh)
	go e.podController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go util.Until(e.worker, time.Second, stopCh)
	}
	go func() {
		defer util.HandleCrash()
		time.Sleep(5 * time.Minute) // give time for our cache to fill
		e.checkLeftoverEndpoints()
	}()
	<-stopCh
	e.queue.ShutDown()
}
Esempio n. 23
0
// runs the main kubelet loop, closing the kubeletFinished chan when the loop exits.
// never returns.
func (kl *kubeletExecutor) Run(updates <-chan kubelet.PodUpdate) {
	defer func() {
		close(kl.kubeletFinished)
		util.HandleCrash()
		log.Infoln("kubelet run terminated") //TODO(jdef) turn down verbosity
		// important: never return! this is in our contract
		select {}
	}()

	// push updates through a closable pipe. when the executor indicates shutdown
	// via Done() we want to stop the Kubelet from processing updates.
	pipe := make(chan kubelet.PodUpdate)
	go func() {
		// closing pipe will cause our patched kubelet's syncLoop() to exit
		defer close(pipe)
	pipeLoop:
		for {
			select {
			case <-kl.executorDone:
				break pipeLoop
			default:
				select {
				case u := <-updates:
					select {
					case pipe <- u: // noop
					case <-kl.executorDone:
						break pipeLoop
					}
				case <-kl.executorDone:
					break pipeLoop
				}
			}
		}
	}()

	// we expect that Run() will complete after the pipe is closed and the
	// kubelet's syncLoop() has finished processing its backlog, which hopefully
	// will not take very long. Peeking into the future (current k8s master) it
	// seems that the backlog has grown from 1 to 50 -- this may negatively impact
	// us going forward, time will tell.
	util.Until(func() { kl.Kubelet.Run(pipe) }, 0, kl.executorDone)

	//TODO(jdef) revisit this if/when executor failover lands
	err := kl.SyncPods([]*api.Pod{}, nil, nil, time.Now())
	if err != nil {
		log.Errorf("failed to cleanly remove all pods and associated state: %v", err)
	}
}
Esempio n. 24
0
func doEtcdFailure(failCommand, fixCommand string, repeat bool) {
	By("failing etcd")

	if repeat {
		stop := make(chan struct{}, 1)
		go util.Until(func() {
			defer GinkgoRecover()
			masterExec(failCommand)
		}, 1*time.Second, stop)
		time.Sleep(etcdFailureDuration)
		stop <- struct{}{}
	} else {
		masterExec(failCommand)
		time.Sleep(etcdFailureDuration)
	}
	masterExec(fixCommand)
}
Esempio n. 25
0
func testMasterUpgrade(ip, v string, mUp func(v string) error) {
	Logf("Starting async validation")
	httpClient := http.Client{Timeout: 2 * time.Second}
	done := make(chan struct{}, 1)
	// Let's make sure we've finished the heartbeat before shutting things down.
	var wg sync.WaitGroup
	go util.Until(func() {
		defer GinkgoRecover()
		wg.Add(1)
		defer wg.Done()

		if err := wait.Poll(poll, singleCallTimeout, func() (bool, error) {
			r, err := httpClient.Get("http://" + ip)
			if err != nil {
				Logf("Error reaching %s: %v", ip, err)
				return false, nil
			}
			if r.StatusCode < http.StatusOK || r.StatusCode >= http.StatusNotFound {
				Logf("Bad response; status: %d, response: %v", r.StatusCode, r)
				return false, nil
			}
			return true, nil
		}); err != nil {
			// We log the error here because the test will fail at the very end
			// because this validation runs in another goroutine. Without this,
			// a failure is very confusing to track down because from the logs
			// everything looks fine.
			msg := fmt.Sprintf("Failed to contact service during master upgrade: %v", err)
			Logf(msg)
			Failf(msg)
		}
	}, 200*time.Millisecond, done)

	Logf("Starting master upgrade")
	expectNoError(mUp(v))
	done <- struct{}{}
	Logf("Stopping async validation")
	wg.Wait()

	// TODO(mbforbes): Validate that:
	// - the master software version truly changed

	Logf("Master upgrade complete")
}
func TestWatchPods(t *testing.T) {
	fakeWatch := watch.NewFake()
	client := &testclient.Fake{Watch: fakeWatch}
	manager := NewReplicationManager(client, BurstReplicas)
	manager.podStoreSynced = alwaysReady

	// Put one rc and one pod into the controller's stores
	testControllerSpec := newReplicationController(1)
	manager.rcStore.Store.Add(testControllerSpec)
	received := make(chan string)
	// The pod update sent through the fakeWatcher should figure out the managing rc and
	// send it into the syncHandler.
	manager.syncHandler = func(key string) error {

		obj, exists, err := manager.rcStore.Store.GetByKey(key)
		if !exists || err != nil {
			t.Errorf("Expected to find controller under key %v", key)
		}
		controllerSpec := obj.(*api.ReplicationController)
		if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
			t.Errorf("\nExpected %#v,\nbut got %#v", testControllerSpec, controllerSpec)
		}
		close(received)
		return nil
	}
	// Start only the pod watcher and the workqueue, send a watch event,
	// and make sure it hits the sync method for the right rc.
	stopCh := make(chan struct{})
	defer close(stopCh)
	go manager.podController.Run(stopCh)
	go util.Until(manager.worker, 10*time.Millisecond, stopCh)

	pods := newPodList(nil, 1, api.PodRunning, testControllerSpec)
	testPod := pods.Items[0]
	testPod.Status.Phase = api.PodFailed
	fakeWatch.Add(&testPod)

	select {
	case <-received:
	case <-time.After(controllerTimeout):
		t.Errorf("Expected 1 call but got 0")
	}
}
Esempio n. 27
0
func doWork() ([]source_api.Source, sinks.ExternalSinkManager, manager.Manager, error) {
	sources, err := newSources()
	if err != nil {
		return nil, nil, nil, err
	}
	sinkManager, err := sinks.NewExternalSinkManager(nil)
	if err != nil {
		return nil, nil, nil, err
	}
	manager, err := manager.NewManager(sources, sinkManager, *argStatsResolution, *argCacheDuration, *argUseModel, *argModelResolution, *argAlignStats)
	if err != nil {
		return nil, nil, nil, err
	}
	if err := manager.SetSinkUris(argSinks); err != nil {
		return nil, nil, nil, err
	}
	go util.Until(manager.Housekeep, *argPollDuration, util.NeverStop)
	return sources, sinkManager, manager, nil
}
Esempio n. 28
0
// TestPolicyListRespectingLabels tests that a List() call for some namespace, filtered with a label to the ReadOnlyPolicyCache
// will return all policies in that namespace matching that label
func TestPolicyListRespectingLabels(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
	defer close(cacheChannel)

	var policies *authorizationapi.PolicyList
	var err error

	desiredName := "nonUniquePolicyName"
	namespace := "namespaceTwo"
	key := "labelToMatchOn"
	operator := labels.EqualsOperator
	val := util.NewStringSet("someValue")
	requirement, err := labels.NewRequirement(key, operator, val)
	if err != nil {
		t.Errorf("labels.Selector misconstructed: %v", err)
	}

	label := labels.LabelSelector{*requirement}
	field := fields.Everything()

	util.Until(func() {
		policies, err = testCache.List(label, field, namespace)

		if (err == nil) &&
			(policies != nil) &&
			(len(policies.Items) == 1) &&
			(policies.Items[0].Name == desiredName) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policies using ReadOnlyPolicyCache: %v", err)
	case policies == nil:
		t.Error("PoliciesList is nil")
	case len(policies.Items) != 1:
		t.Errorf("Expected policyList to have 1 policy, had %d", len(policies.Items))
	case policies.Items[0].Name != desiredName:
		t.Errorf("Expected policy name to be '%s', was '%s'", desiredName, policies.Items[0].Name)
	}
}
Esempio n. 29
0
func doWork() ([]api.Source, sinks.ExternalSinkManager, manager.Manager, error) {
	sources, err := newSources()
	if err != nil {
		return nil, nil, nil, err
	}
	externalSinks, err := newSinks()
	if err != nil {
		return nil, nil, nil, err
	}
	sinkManager, err := sinks.NewExternalSinkManager(externalSinks)
	if err != nil {
		return nil, nil, nil, err
	}
	manager, err := manager.NewManager(sources, sinkManager, *argStatsResolution, *argCacheDuration)
	if err != nil {
		return nil, nil, nil, err
	}
	go util.Until(manager.Housekeep, *argPollDuration, util.NeverStop)
	return sources, sinkManager, manager, nil
}
func TestWatchControllers(t *testing.T) {
	fakeWatch := watch.NewFake()
	client := &testclient.Fake{Watch: fakeWatch}
	manager := NewReplicationManager(client, BurstReplicas)
	manager.podStoreSynced = alwaysReady

	var testControllerSpec api.ReplicationController
	received := make(chan string)

	// The update sent through the fakeWatcher should make its way into the workqueue,
	// and eventually into the syncHandler. The handler validates the received controller
	// and closes the received channel to indicate that the test can finish.
	manager.syncHandler = func(key string) error {

		obj, exists, err := manager.rcStore.Store.GetByKey(key)
		if !exists || err != nil {
			t.Errorf("Expected to find controller under key %v", key)
		}
		controllerSpec := *obj.(*api.ReplicationController)
		if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
			t.Errorf("Expected %#v, but got %#v", testControllerSpec, controllerSpec)
		}
		close(received)
		return nil
	}
	// Start only the rc watcher and the workqueue, send a watch event,
	// and make sure it hits the sync method.
	stopCh := make(chan struct{})
	defer close(stopCh)
	go manager.rcController.Run(stopCh)
	go util.Until(manager.worker, 10*time.Millisecond, stopCh)

	testControllerSpec.Name = "foo"
	fakeWatch.Add(&testControllerSpec)

	select {
	case <-received:
	case <-time.After(controllerTimeout):
		t.Errorf("Expected 1 call but got 0")
	}
}