Ejemplo n.º 1
0
func doWork() ([]source_api.Source, sinks.ExternalSinkManager, manager.Manager, error) {
	c := cache.NewCache(*argCacheDuration, time.Minute)
	sources, err := newSources(c)
	if err != nil {
		return nil, nil, nil, err
	}
	sinkManager, err := sinks.NewExternalSinkManager(nil, c, *argSinkFrequency)
	if err != nil {
		return nil, nil, nil, err
	}
	manager, err := manager.NewManager(sources, sinkManager, *argStatsResolution, *argCacheDuration, c, *argUseModel, *argModelResolution)
	if err != nil {
		return nil, nil, nil, err
	}
	if err := manager.SetSinkUris(argSinks); err != nil {
		return nil, nil, nil, err
	}

	// Spawn the Model Housekeeping goroutine even if the model is not enabled.
	// This will allow the model to be activated/deactivated in runtime.
	modelDuration := 2 * *argModelResolution
	if (*argCacheDuration).Nanoseconds() < modelDuration.Nanoseconds() {
		modelDuration = *argCacheDuration
	}
	go util.Until(manager.HousekeepModel, modelDuration, util.NeverStop)

	go util.Until(manager.Housekeep, *argPollDuration, util.NeverStop)
	return sources, sinkManager, manager, nil
}
Ejemplo n.º 2
0
func doWork() ([]source_api.Source, sinks.ExternalSinkManager, manager.Manager, error) {
	c := cache.NewCache(*argCacheDuration, time.Minute)
	sources, err := newSources(c)
	if err != nil {
		return nil, nil, nil, err
	}
	sinkManager, err := sinks.NewExternalSinkManager(nil, c, *argSinkFrequency)
	if err != nil {
		return nil, nil, nil, err
	}
	manager, err := manager.NewManager(sources, sinkManager, *argStatsResolution, *argCacheDuration, c, *argUseModel, *argModelResolution)
	if err != nil {
		return nil, nil, nil, err
	}
	if err := manager.SetSinkUris(argSinks); err != nil {
		return nil, nil, nil, err
	}

	// Spawn the Model Housekeeping goroutine even if the model is not enabled.
	// This will allow the model to be activated/deactivated in runtime.
	// Set the housekeeping period to 2 * argModelResolution + 25 sec
	// TODO(afein): select a more well-defined housekeeping interval
	modelDuration := 2 * *argModelResolution
	modelDuration = time.Time{}.Add(modelDuration).Add(25 * time.Second).Sub(time.Time{})
	if (*argCacheDuration).Nanoseconds() < modelDuration.Nanoseconds() {
		modelDuration = *argCacheDuration
	}
	go util.Until(manager.HousekeepModel, modelDuration, util.NeverStop)

	go util.Until(manager.Housekeep, *argPollDuration, util.NeverStop)
	return sources, sinkManager, manager, nil
}
Ejemplo n.º 3
0
// Run starts an asynchronous loop that monitors the status of cluster nodes.
func (nc *NodeController) Run(period time.Duration) {
	// Incorporate the results of node status pushed from kubelet to master.
	go util.Until(func() {
		if err := nc.monitorNodeStatus(); err != nil {
			glog.Errorf("Error monitoring node status: %v", err)
		}
	}, nc.nodeMonitorPeriod, util.NeverStop)

	// Managing eviction of nodes:
	// 1. when we delete pods off a node, if the node was not empty at the time we then
	//    queue a termination watcher
	//    a. If we hit an error, retry deletion
	// 2. The terminator loop ensures that pods are eventually cleaned and we never
	//    terminate a pod in a time period less than nc.maximumGracePeriod. AddedAt
	//    is the time from which we measure "has this pod been terminating too long",
	//    after which we will delete the pod with grace period 0 (force delete).
	//    a. If we hit errors, retry instantly
	//    b. If there are no pods left terminating, exit
	//    c. If there are pods still terminating, wait for their estimated completion
	//       before retrying
	go util.Until(func() {
		nc.podEvictor.Try(func(value TimedValue) (bool, time.Duration) {
			remaining, err := nc.deletePods(value.Value)
			if err != nil {
				util.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err))
				return false, 0
			}
			if remaining {
				nc.terminationEvictor.Add(value.Value)
			}
			return true, 0
		})
	}, nodeEvictionPeriod, util.NeverStop)

	// TODO: replace with a controller that ensures pods that are terminating complete
	// in a particular time period
	go util.Until(func() {
		nc.terminationEvictor.Try(func(value TimedValue) (bool, time.Duration) {
			completed, remaining, err := nc.terminatePods(value.Value, value.AddedAt)
			if err != nil {
				util.HandleError(fmt.Errorf("unable to terminate pods on node %q: %v", value.Value, err))
				return false, 0
			}

			if completed {
				glog.Infof("All pods terminated on %s", value.Value)
				nc.recordNodeEvent(value.Value, "TerminatedAllPods", fmt.Sprintf("Terminated all Pods on Node %s.", value.Value))
				return true, 0
			}

			glog.V(2).Infof("Pods terminating since %s on %q, estimated completion %s", value.AddedAt, value.Value, remaining)
			// clamp very short intervals
			if remaining < nodeEvictionPeriod {
				remaining = nodeEvictionPeriod
			}
			return false, remaining
		})
	}, nodeEvictionPeriod, util.NeverStop)
}
// Run begins quota controller using the specified number of workers
func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	go rq.rqController.Run(stopCh)
	go rq.podController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go util.Until(rq.worker, time.Second, stopCh)
	}
	go util.Until(func() { rq.enqueueAll() }, rq.resyncPeriod(), stopCh)
	<-stopCh
	glog.Infof("Shutting down ResourceQuotaController")
	rq.queue.ShutDown()
}
Ejemplo n.º 5
0
// Create a new Cacher responsible from service WATCH and LIST requests from its
// internal cache and updating its cache in the background based on the given
// configuration.
func NewCacher(config CacherConfig) *Cacher {
	watchCache := cache.NewWatchCache(config.CacheCapacity)
	listerWatcher := newCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc)

	cacher := &Cacher{
		initialized: sync.WaitGroup{},
		watchCache:  watchCache,
		reflector:   cache.NewReflector(listerWatcher, config.Type, watchCache, 0),
		watcherIdx:  0,
		watchers:    make(map[int]*cacheWatcher),
		versioner:   config.Versioner,
		keyFunc:     config.KeyFunc,
	}
	cacher.initialized.Add(1)
	// See startCaching method for why explanation on it.
	watchCache.SetOnReplace(func() {
		cacher.initOnce.Do(func() { cacher.initialized.Done() })
		cacher.Unlock()
	})
	watchCache.SetOnEvent(cacher.processEvent)

	stopCh := config.StopChannel
	go util.Until(func() { cacher.startCaching(stopCh) }, 0, stopCh)
	cacher.initialized.Wait()
	return cacher
}
Ejemplo n.º 6
0
// Run runs the specified ProxyServer.  This should never exit (unless CleanupAndExit is set).
func (s *ProxyServer) Run(_ []string) error {
	// remove iptables rules and exit
	if s.Config.CleanupAndExit {
		encounteredError := userspace.CleanupLeftovers(s.IptInterface)
		encounteredError = iptables.CleanupLeftovers(s.IptInterface) || encounteredError
		if encounteredError {
			return errors.New("Encountered an error while tearing down rules.")
		}
		return nil
	}

	s.Broadcaster.StartRecordingToSink(s.Client.Events(""))

	// Birth Cry after the birth is successful
	s.birthCry()

	// Start up Healthz service if requested
	if s.Config.HealthzPort > 0 {
		go util.Until(func() {
			err := http.ListenAndServe(s.Config.HealthzBindAddress.String()+":"+strconv.Itoa(s.Config.HealthzPort), nil)
			if err != nil {
				glog.Errorf("Starting health server failed: %v", err)
			}
		}, 5*time.Second, util.NeverStop)
	}

	// Just loop forever for now...
	s.Proxier.SyncLoop()
	return nil
}
Ejemplo n.º 7
0
func (d *DeploymentController) Run(syncPeriod time.Duration) {
	go util.Until(func() {
		if err := d.reconcileDeployments(); err != nil {
			glog.Errorf("Couldnt reconcile deployments: %v", err)
		}
	}, syncPeriod, util.NeverStop)
}
Ejemplo n.º 8
0
// RunKubernetesService periodically updates the kubernetes service
func (c *Controller) RunKubernetesService(ch chan struct{}) {
	util.Until(func() {
		if err := c.UpdateKubernetesService(); err != nil {
			util.HandleError(fmt.Errorf("unable to sync kubernetes service: %v", err))
		}
	}, c.EndpointInterval, ch)
}
Ejemplo n.º 9
0
// RunUntil starts the controller until the provided ch is closed.
func (c *Repair) RunUntil(ch chan struct{}) {
	util.Until(func() {
		if err := c.RunOnce(); err != nil {
			util.HandleError(err)
		}
	}, c.interval, ch)
}
Ejemplo n.º 10
0
// TestClusterPolicyBindingListRespectingFields tests that a List() call, filtered with a field to the ReadOnlyClusterPolicyBindingCache
// will return all clusterPolicyBindings matching that field
func TestClusterPolicyBindingListRespectingFields(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicybindingcache()
	defer close(cacheChannel)

	var clusterPolicyBindings *authorizationapi.ClusterPolicyBindingList
	var err error

	name := "uniqueClusterPolicyBindingName"
	field := fields.OneTermEqualSelector("metadata.name", name)

	util.Until(func() {
		clusterPolicyBindings, err = testCache.List(&unversioned.ListOptions{FieldSelector: unversioned.FieldSelector{Selector: field}})

		if (err == nil) &&
			(clusterPolicyBindings != nil) &&
			(len(clusterPolicyBindings.Items) == 1) &&
			(clusterPolicyBindings.Items[0].Name == name) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting clusterPolicyBinding with fieldSelector using ReadOnlyClusterBindingCache: %v", err)
	case clusterPolicyBindings == nil:
		t.Error("ClusterPolicyBindingList using fieldSelector is nil")
	case len(clusterPolicyBindings.Items) != 1:
		t.Errorf("Expected clusterPolicyBindingList using fieldSelector to contain 1 items, had %d", len(clusterPolicyBindings.Items))
	case clusterPolicyBindings.Items[0].Name != name:
		t.Errorf("Expected clusterPolicyBinding to have name '%s', had '%s'", name, clusterPolicyBindings.Items[0].Name)
	}
}
Ejemplo n.º 11
0
// TestPolicyList tests that a List() call for a namespace to the ReadOnlyPolicyCache will return all policies in that namespace
func TestPolicyList(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
	defer close(cacheChannel)

	var policies *authorizationapi.PolicyList
	var err error

	namespace := "namespaceTwo"

	util.Until(func() {
		policies, err = testCache.List(nil, namespace)

		if (err == nil) &&
			(policies != nil) &&
			(len(policies.Items) == 2) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policies using ReadOnlyPolicyCache: %v", err)
	case policies == nil:
		t.Error("PoliciesList is nil")
	case len(policies.Items) != 2:
		t.Errorf("Expected policyList to have 2 policies, had %d", len(policies.Items))
	}
}
Ejemplo n.º 12
0
// TestPolicyGet tests that a Get() call to the ReadOnlyPolicyCache will retrieve the correct policy
func TestPolicyGet(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
	defer close(cacheChannel)

	var policy *authorizationapi.Policy
	var err error

	namespace := "namespaceTwo"
	name := "uniquePolicyName"

	util.Until(func() {
		policy, err = testCache.Get(name, namespace)

		if (err == nil) &&
			(policy != nil) &&
			(policy.Name == name) &&
			(policy.Namespace == namespace) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policy using ReadOnlyPolicyCache: %v", err)
	case policy == nil:
		t.Error("Policy is nil")
	case policy.Name != name:
		t.Errorf("Expected policy name to be '%s', was '%s'", name, policy.Name)
	case policy.Namespace != namespace:
		t.Errorf("Expected policy namespace to be '%s', was '%s'", namespace, policy.Namespace)
	}
}
Ejemplo n.º 13
0
// TestPolicyListRespectingFields tests that a List() call for some namespace, filtered with a field to the ReadOnlyPolicyCache
// will return all policies in that namespace matching that field
func TestPolicyListRespectingFields(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
	defer close(cacheChannel)

	var policies *authorizationapi.PolicyList
	var err error

	name := "uniquePolicyName"
	namespace := "namespaceTwo"
	field := fields.OneTermEqualSelector("metadata.name", name)

	util.Until(func() {
		policies, err = testCache.List(&kapi.ListOptions{FieldSelector: field}, namespace)

		if (err == nil) &&
			(policies != nil) &&
			(len(policies.Items) == 1) &&
			(policies.Items[0].Name == name) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policies using ReadOnlyPolicyCache: %v", err)
	case policies == nil:
		t.Error("PoliciesList is nil")
	case len(policies.Items) != 1:
		t.Errorf("Expected policyList to have 1 policy, had %d", len(policies.Items))
	case policies.Items[0].Name != name:
		t.Errorf("Expected policy name to be '%s', was '%s'", name, policies.Items[0].Name)
	}
}
Ejemplo n.º 14
0
// TestListClusterPolicyBindings tests that a ReadOnlyPolicyClient ListPolicyBindings() call correctly lists cluster policy bindings
// when the namespace given is the empty string
func TestListClusterPolicyBindings(t *testing.T) {
	testClient, policyStopChannel, bindingStopChannel, testChannel := beforeTestingSetup_readonlycache()
	defer close(policyStopChannel)
	defer close(bindingStopChannel)

	var clusterPolicyBindings *authorizationapi.PolicyBindingList
	var err error

	namespace := ""
	context := kapi.WithNamespace(kapi.NewContext(), namespace)

	util.Until(func() {
		clusterPolicyBindings, err = testClient.ListPolicyBindings(context, nil)

		if (err == nil) &&
			(clusterPolicyBindings != nil) &&
			(len(clusterPolicyBindings.Items) == 2) &&
			(strings.Contains(clusterPolicyBindings.Items[0].Name, "Cluster")) &&
			(strings.Contains(clusterPolicyBindings.Items[1].Name, "Cluster")) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting cluster policy binding using ListPolicyBindings(): %v", err)
	case clusterPolicyBindings == nil:
		t.Error("ClusterPolicyBindingsList is nil")
	case len(clusterPolicyBindings.Items) != 2:
		t.Errorf("ClusterPolicyBindingsList contains %d items, should contain 2.", len(clusterPolicyBindings.Items))
	case !strings.Contains(clusterPolicyBindings.Items[0].Name, "Cluster") || !strings.Contains(clusterPolicyBindings.Items[1].Name, "Cluster"):
		t.Error("ClusterPolicyBinding name should contain \"Cluster\", but did not.")
	}
}
Ejemplo n.º 15
0
// TestPolicyListNamespaceAll tests that a List() call for kapi.NamespaceAll to the ReadOnlyPolicyCache will return
// all policies in all namespaces
func TestPolicyListNamespaceAll(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
	defer close(cacheChannel)

	var policies *authorizationapi.PolicyList
	var err error

	namespace := kapi.NamespaceAll
	label := labels.Everything()
	field := fields.Everything()

	util.Until(func() {
		policies, err = testCache.List(label, field, namespace)

		if (err == nil) &&
			(policies != nil) &&
			(len(policies.Items) == 3) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policies using ReadOnlyPolicyCache: %v", err)
	case policies == nil:
		t.Error("PoliciesList is nil")
	case len(policies.Items) != 3:
		t.Errorf("Expected policyList to have 3 policies, had %d", len(policies.Items))
	}
}
Ejemplo n.º 16
0
// TestPolicyBindingGetRespectingNamespaces tests that a Get() call to the ReadOnlyPolicyBindingCache will retrieve the correct policy binding
// when the name is an nonUnique identifier but the set {name, namespace} is not
func TestPolicyBindingGetRespectingNamespaces(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicybindingcache()
	defer close(cacheChannel)

	var policyBinding *authorizationapi.PolicyBinding
	var err error

	namespace := "namespaceOne"
	name := "nonUniquePolicyBindingName"

	util.Until(func() {
		policyBinding, err = testCache.Get(name, namespace)

		if (err == nil) &&
			(policyBinding != nil) &&
			(policyBinding.Name == name) &&
			(policyBinding.Namespace == namespace) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policyBinding using ReadOnlyPolicyBindingCache: %v", err)
	case policyBinding == nil:
		t.Error("PolicyBinding is nil.")
	case policyBinding.Name != name:
		t.Errorf("Expected policyBinding name to be '%s', was '%s'", name, policyBinding.Name)
	case policyBinding.Namespace != namespace:
		t.Errorf("Expected policyBinding namespace to be '%s', was '%s'", namespace, policyBinding.Namespace)
	}
}
Ejemplo n.º 17
0
// TestPolicyBindingList tests that a List() call for a namespace to the ReadOnlyPolicyBindingCache will return all policyBindings in that namespace
func TestPolicyBindingList(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicybindingcache()
	defer close(cacheChannel)

	var policyBindings *authorizationapi.PolicyBindingList
	var err error

	namespace := "namespaceTwo"
	label := labels.Everything()
	field := fields.Everything()

	util.Until(func() {
		policyBindings, err = testCache.List(label, field, namespace)

		if (err == nil) &&
			(policyBindings != nil) &&
			(len(policyBindings.Items) == 2) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policyBindingList using ReadOnlyPolicyBindingCache: %v", err)
	case policyBindings == nil:
		t.Error("PolicyBindingList is nil.")
	case len(policyBindings.Items) != 2:
		t.Errorf("Expected policyBindingList to have 2 items, had %d", len(policyBindings.Items))
	}
}
Ejemplo n.º 18
0
// TestClusterPolicyBindingList tests that a List() call to the ReadOnlyClusterPolicyBindingCache will return all clusterPolicyBindings
func TestClusterPolicyBindingList(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicybindingcache()
	defer close(cacheChannel)

	var clusterPolicyBindings *authorizationapi.ClusterPolicyBindingList
	var err error

	util.Until(func() {
		clusterPolicyBindings, err = testCache.List(nil)

		if (err == nil) &&
			(clusterPolicyBindings != nil) &&
			(len(clusterPolicyBindings.Items) == 2) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting clusterPolicyBindingList using ReadOnlyClusterBindingCache: %v", err)
	case clusterPolicyBindings == nil:
		t.Error("ClusterPolicyBindingList is nil")
	case len(clusterPolicyBindings.Items) != 2:
		t.Errorf("Expected clusterPolicyBindingList to contain 2 items, had %d", len(clusterPolicyBindings.Items))
	}
}
Ejemplo n.º 19
0
func startKubelet(k KubeletBootstrap, podCfg *config.PodConfig, kc *KubeletConfig) {
	// start the kubelet
	go util.Until(func() { k.Run(podCfg.Updates()) }, 0, util.NeverStop)

	// start the kubelet server
	if kc.EnableServer {
		go util.Until(func() {
			k.ListenAndServe(kc.Address, kc.Port, kc.TLSOptions, kc.Auth, kc.EnableDebuggingHandlers)
		}, 0, util.NeverStop)
	}
	if kc.ReadOnlyPort > 0 {
		go util.Until(func() {
			k.ListenAndServeReadOnly(kc.Address, kc.ReadOnlyPort)
		}, 0, util.NeverStop)
	}
}
Ejemplo n.º 20
0
func (a *HorizontalController) Run(syncPeriod time.Duration) {
	go util.Until(func() {
		if err := a.reconcileAutoscalers(); err != nil {
			glog.Errorf("Couldn't reconcile horizontal pod autoscalers: %v", err)
		}
	}, syncPeriod, util.NeverStop)
}
Ejemplo n.º 21
0
// TestPolicyBindingListRespectingFields tests that a List() call for some namespace, filtered with a field to the ReadOnlyPolicyBindingCache
// will return all policyBindings in that namespace matching that field
func TestPolicyBindingListRespectingFields(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicybindingcache()
	defer close(cacheChannel)

	var policyBindings *authorizationapi.PolicyBindingList
	var err error

	name := "uniquePolicyBindingName"
	namespace := "namespaceTwo"
	label := labels.Everything()
	field := fields.OneTermEqualSelector("metadata.name", name)

	util.Until(func() {
		policyBindings, err = testCache.List(label, field, namespace)

		if (err == nil) &&
			(policyBindings != nil) &&
			(len(policyBindings.Items) == 1) &&
			(policyBindings.Items[0].Name == name) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting policyBindingList using ReadOnlyPolicyBindingCache: %v", err)
	case policyBindings == nil:
		t.Error("PolicyBindingList is nil.")
	case len(policyBindings.Items) != 1:
		t.Errorf("Expected policyBindingList to have 1 item, had %d", len(policyBindings.Items))
	case policyBindings.Items[0].Name != name:
		t.Errorf("Expected policyBinding name to be '%s', was '%s'", name, policyBindings.Items[0].Name)
	}
}
Ejemplo n.º 22
0
// TestClusterPolicyGet tests that a Get() call to the ReadOnlyClusterPolicyCache will retrieve the correct clusterPolicy
func TestClusterPolicyGet(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicycache()
	defer close(cacheChannel)

	var clusterPolicy *authorizationapi.ClusterPolicy
	var err error

	name := "uniqueClusterPolicyName"

	util.Until(func() {
		clusterPolicy, err = testCache.Get(name)

		if (err == nil) &&
			(clusterPolicy != nil) &&
			(clusterPolicy.Name == name) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting clusterPolicy using ReadOnlyClusterPolicyCache: %v", err)
	case clusterPolicy == nil:
		t.Error("ClusterPolicy is nil.")
	case clusterPolicy.Name != name:
		t.Errorf("Expected clusterPolicy name to be '%s', was '%s'", name, clusterPolicy.Name)
	}
}
Ejemplo n.º 23
0
// TestClusterPolicyList tests that a List() call to the ReadOnlyClusterPolicyCache will return all clusterPolicies
func TestClusterPolicyList(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicycache()
	defer close(cacheChannel)

	var clusterPolicies *authorizationapi.ClusterPolicyList
	var err error

	label := labels.Everything()
	field := fields.Everything()

	util.Until(func() {
		clusterPolicies, err = testCache.List(label, field)

		if (err == nil) &&
			(clusterPolicies != nil) &&
			(len(clusterPolicies.Items) == 2) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting clusterPolicyList using ReadOnlyClusterPolicyCache: %v", err)
	case clusterPolicies == nil:
		t.Error("ClusterPolicyList is nil.")
	case len(clusterPolicies.Items) != 2:
		t.Errorf("Expected clusterPolicyList to contain 2 clusterPolicies, contained %d", len(clusterPolicies.Items))
	}
}
Ejemplo n.º 24
0
func main() {
	clientConfig := kubectl_util.DefaultClientConfig(flags)
	flags.Parse(os.Args)
	cfg := parseCfg(*config, *lbDefAlgorithm)

	var kubeClient *unversioned.Client
	var err error

	defErrorPage := newStaticPageHandler(*errorPage, defaultErrorPage)
	if defErrorPage == nil {
		glog.Fatalf("Failed to load the default error page")
	}

	go registerHandlers(defErrorPage)

	var tcpSvcs map[string]int
	if *tcpServices != "" {
		tcpSvcs = parseTCPServices(*tcpServices)
	} else {
		glog.Infof("No tcp/https services specified")
	}

	if *startSyslog {
		cfg.startSyslog = true
		_, err = newSyslogServer("/var/run/haproxy.log.socket")
		if err != nil {
			glog.Fatalf("Failed to start syslog server: %v", err)
		}
	}

	if *cluster {
		if kubeClient, err = unversioned.NewInCluster(); err != nil {
			glog.Fatalf("Failed to create client: %v", err)
		}
	} else {
		config, err := clientConfig.ClientConfig()
		if err != nil {
			glog.Fatalf("error connecting to the client: %v", err)
		}
		kubeClient, err = unversioned.New(config)
	}
	namespace, specified, err := clientConfig.Namespace()
	if err != nil {
		glog.Fatalf("unexpected error: %v", err)
	}
	if !specified {
		namespace = api.NamespaceAll
	}

	// TODO: Handle multiple namespaces
	lbc := newLoadBalancerController(cfg, kubeClient, namespace, tcpSvcs)
	go lbc.epController.Run(util.NeverStop)
	go lbc.svcController.Run(util.NeverStop)
	if *dry {
		dryRun(lbc)
	} else {
		lbc.cfg.reload()
		util.Until(lbc.worker, time.Second, util.NeverStop)
	}
}
Ejemplo n.º 25
0
// TestClusterPolicyListRespectingFields tests that a List() call, filtered with a field to the ReadOnlyClusterPolicyCache
// will return all clusterPolicies matching that field
func TestClusterPolicyListRespectingFields(t *testing.T) {
	testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicycache()
	defer close(cacheChannel)

	var clusterPolicies *authorizationapi.ClusterPolicyList
	var err error

	name := "uniqueClusterPolicyName"
	label := labels.Everything()
	field := fields.OneTermEqualSelector("metadata.name", name)

	util.Until(func() {
		clusterPolicies, err = testCache.List(label, field)

		if (err == nil) &&
			(clusterPolicies != nil) &&
			(len(clusterPolicies.Items) == 1) &&
			(clusterPolicies.Items[0].Name == name) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting clusterPolicyList with fieldSelector using ReadOnlyClusterPolicyCache: %v", err)
	case clusterPolicies == nil:
		t.Error("ClusterPolicyList is nil.")
	case len(clusterPolicies.Items) != 1:
		t.Errorf("Expected clusterPolicyList to contain 2 clusterPolicies, contained %d", len(clusterPolicies.Items))
	case clusterPolicies.Items[0].Name != name:
		t.Errorf("Expected field-selected clusterPolicy name to be '%s', was '%s'", name, clusterPolicies.Items[0].Name)
	}
}
Ejemplo n.º 26
0
func (nm *realNodeManager) Start() {
	if nm.client == nil {
		return
	}
	nm.setNodeLister()
	go util.Until(nm.syncNodeStatus, nm.nodeStatusUpdateFrequency, util.NeverStop)
}
Ejemplo n.º 27
0
func (rc *RouteController) Run(syncPeriod time.Duration) {
	go util.Until(func() {
		if err := rc.reconcileNodeRoutes(); err != nil {
			glog.Errorf("Couldn't reconcile node routes: %v", err)
		}
	}, syncPeriod, util.NeverStop)
}
Ejemplo n.º 28
0
// TestGetClusterPolicy tests that a ReadOnlyPolicyClient GetPolicy() call correctly retrieves a cluster policy
// when the namespace given is equal to the empty string
func TestGetClusterPolicy(t *testing.T) {
	testClient, policyStopChannel, bindingStopChannel, testChannel := beforeTestingSetup_readonlycache()
	defer close(policyStopChannel)
	defer close(bindingStopChannel)

	var clusterPolicy *authorizationapi.Policy
	var err error

	namespace := ""
	context := kapi.WithNamespace(kapi.NewContext(), namespace)
	name := "uniqueClusterPolicyName"

	util.Until(func() {
		clusterPolicy, err = testClient.GetPolicy(context, name)

		if (err == nil) &&
			(clusterPolicy != nil) &&
			(clusterPolicy.Name == name) &&
			(clusterPolicy.Namespace == namespace) {
			close(testChannel)
		}
	}, 1*time.Millisecond, testChannel)

	switch {
	case err != nil:
		t.Errorf("Error getting cluster policy using GetPolicy(): %v", err)
	case clusterPolicy == nil:
		t.Error("Policy is nil")
	case clusterPolicy.Name != name:
		t.Errorf("Expected policy.Name to be '%s', but got '%s'", name, clusterPolicy.Name)
	case clusterPolicy.Namespace != "":
		t.Errorf("Expected policy.Namespace to be '%s', but got '%s'", namespace, clusterPolicy.Namespace)
	}
}
func (cm *containerManagerImpl) Start() error {
	// Setup the node
	if err := cm.setupNode(); err != nil {
		return err
	}
	// Don't run a background thread if there are no ensureStateFuncs.
	numEnsureStateFuncs := 0
	for _, cont := range cm.systemContainers {
		if cont.ensureStateFunc != nil {
			numEnsureStateFuncs++
		}
	}
	if numEnsureStateFuncs == 0 {
		return nil
	}

	// Run ensure state functions every minute.
	go util.Until(func() {
		for _, cont := range cm.systemContainers {
			if cont.ensureStateFunc != nil {
				if err := cont.ensureStateFunc(cont.manager); err != nil {
					glog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err)
				}
			}
		}
	}, time.Minute, util.NeverStop)

	return nil
}
Ejemplo n.º 30
0
// DiscoverTopology receives a discovery request from server and start probing the k8s.
func (handler *KubernetesServerMessageHandler) DiscoverTopology(serverMsg *comm.MediationServerMessage) {
	//Discover the kubernetes topology
	glog.V(3).Infof("Discover topology request from server.")

	// 1. Get message ID
	messageID := serverMsg.GetMessageID()
	var stopCh chan struct{} = make(chan struct{})
	go util.Until(func() { handler.keepDiscoverAlive(messageID) }, time.Second*10, stopCh)
	defer close(stopCh)

	// 2. Build discoverResponse
	// must have kubeClient to do ParseNode and ParsePod
	if handler.kubeClient == nil {
		glog.V(3).Infof("kubenetes client is nil, error")
		return
	}

	kubeProbe := probe.NewKubeProbe(handler.kubeClient)

	nodeEntityDtos, err := kubeProbe.ParseNode()
	if err != nil {
		// TODO, should here still send out msg to server?
		glog.Errorf("Error parsing nodes: %s. Will return.", err)
		return
	}

	podEntityDtos, err := kubeProbe.ParsePod(api.NamespaceAll)
	if err != nil {
		// TODO, should here still send out msg to server? Or set errorDTO?
		glog.Errorf("Error parsing pods: %s. Will return.", err)
		return
	}

	appEntityDtos, err := kubeProbe.ParseApplication(api.NamespaceAll)
	if err != nil {
		glog.Errorf("Error parsing applications: %s. Will return.", err)
		return
	}

	serviceEntityDtos, err := kubeProbe.ParseService(api.NamespaceAll, labels.Everything())
	if err != nil {
		// TODO, should here still send out msg to server? Or set errorDTO?
		glog.Errorf("Error parsing services: %s. Will return.", err)
		return
	}

	entityDtos := nodeEntityDtos
	entityDtos = append(entityDtos, podEntityDtos...)
	entityDtos = append(entityDtos, appEntityDtos...)
	entityDtos = append(entityDtos, serviceEntityDtos...)
	discoveryResponse := &comm.DiscoveryResponse{
		EntityDTO: entityDtos,
	}

	// 3. Build Client message
	clientMsg := comm.NewClientMessageBuilder(messageID).SetDiscoveryResponse(discoveryResponse).Create()

	handler.wsComm.SendClientMessage(clientMsg)
}