示例#1
0
func (gc *GarbageCollector) Run(workers int, stopCh <-chan struct{}) {
	glog.Infof("Garbage Collector: Initializing")
	for _, monitor := range gc.monitors {
		go monitor.controller.Run(stopCh)
	}

	wait.PollInfinite(10*time.Second, func() (bool, error) {
		for _, monitor := range gc.monitors {
			if !monitor.controller.HasSynced() {
				glog.Infof("Garbage Collector: Waiting for resource monitors to be synced...")
				return false, nil
			}
		}
		return true, nil
	})
	glog.Infof("Garbage Collector: All monitored resources synced. Proceeding to collect garbage")

	// worker
	go wait.Until(gc.propagator.processEvent, 0, stopCh)

	for i := 0; i < workers; i++ {
		go wait.Until(gc.worker, 0, stopCh)
		go wait.Until(gc.orphanFinalizer, 0, stopCh)
	}
	Register()
	<-stopCh
	glog.Infof("Garbage Collector: Shutting down")
	gc.dirtyQueue.ShutDown()
	gc.orphanQueue.ShutDown()
	gc.propagator.eventQueue.ShutDown()
}
示例#2
0
func (im *realImageGCManager) Start() {
	go wait.Until(func() {
		// Initial detection make detected time "unknown" in the past.
		var ts time.Time
		if im.initialized {
			ts = time.Now()
		}
		err := im.detectImages(ts)
		if err != nil {
			glog.Warningf("[imageGCManager] Failed to monitor images: %v", err)
		} else {
			im.initialized = true
		}
	}, 5*time.Minute, wait.NeverStop)

	// Start a goroutine periodically updates image cache.
	// TODO(random-liu): Merge this with the previous loop.
	go wait.Until(func() {
		images, err := im.runtime.ListImages()
		if err != nil {
			glog.Warningf("[imageGCManager] Failed to update image list: %v", err)
		} else {
			im.imageCache.set(images)
		}
	}, 30*time.Second, wait.NeverStop)

}
// Runs controller blocks until stopCh is closed
func (e *TokensController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()

	// Start controllers (to fill stores, call informers, fill work queues)
	go e.serviceAccountController.Run(stopCh)
	go e.secretController.Run(stopCh)

	// Wait for stores to fill
	for !e.serviceAccountController.HasSynced() || !e.secretController.HasSynced() {
		time.Sleep(100 * time.Millisecond)
	}

	// Spawn workers to process work queues
	for i := 0; i < workers; i++ {
		go wait.Until(e.syncServiceAccount, 0, stopCh)
		go wait.Until(e.syncSecret, 0, stopCh)
	}

	// Block until stop channel is closed
	<-stopCh

	// Shut down queues
	e.syncServiceAccountQueue.ShutDown()
	e.syncSecretQueue.ShutDown()
}
// Run starts a background goroutine that watches for changes to services that
// have (or had) LoadBalancers=true and ensures that they have
// load balancers created and deleted appropriately.
// serviceSyncPeriod controls how often we check the cluster's services to
// ensure that the correct load balancers exist.
// nodeSyncPeriod controls how often we check the cluster's nodes to determine
// if load balancers need to be updated to point to a new set.
//
// It's an error to call Run() more than once for a given ServiceController
// object.
func (s *ServiceController) Run(workers int) {
	defer runtime.HandleCrash()
	go s.serviceController.Run(wait.NeverStop)
	for i := 0; i < workers; i++ {
		go wait.Until(s.worker, time.Second, wait.NeverStop)
	}
	nodeLW := cache.NewListWatchFromClient(s.kubeClient.Core().RESTClient(), "nodes", v1.NamespaceAll, fields.Everything())
	cache.NewReflector(nodeLW, &v1.Node{}, s.nodeLister.Store, 0).Run()
	go wait.Until(s.nodeSyncLoop, nodeSyncPeriod, wait.NeverStop)
}
示例#5
0
// Run starts an asynchronous loop that monitors the status of cluster nodes.
func (nc *NodeController) Run() {
	go func() {
		defer utilruntime.HandleCrash()

		if !cache.WaitForCacheSync(wait.NeverStop, nc.nodeInformer.Informer().HasSynced, nc.podInformer.Informer().HasSynced, nc.daemonSetInformer.Informer().HasSynced) {
			utilruntime.HandleError(errors.New("NodeController timed out while waiting for informers to sync..."))
			return
		}

		// Incorporate the results of node status pushed from kubelet to master.
		go wait.Until(func() {
			if err := nc.monitorNodeStatus(); err != nil {
				glog.Errorf("Error monitoring node status: %v", err)
			}
		}, nc.nodeMonitorPeriod, wait.NeverStop)

		// Managing eviction of nodes:
		// When we delete pods off a node, if the node was not empty at the time we then
		// queue an eviction watcher. If we hit an error, retry deletion.
		go wait.Until(func() {
			nc.evictorLock.Lock()
			defer nc.evictorLock.Unlock()
			for k := range nc.zonePodEvictor {
				nc.zonePodEvictor[k].Try(func(value TimedValue) (bool, time.Duration) {
					obj, exists, err := nc.nodeStore.GetByKey(value.Value)
					if err != nil {
						glog.Warningf("Failed to get Node %v from the nodeStore: %v", value.Value, err)
					} else if !exists {
						glog.Warningf("Node %v no longer present in nodeStore!", value.Value)
					} else {
						node, _ := obj.(*v1.Node)
						zone := utilnode.GetZoneKey(node)
						EvictionsNumber.WithLabelValues(zone).Inc()
					}

					nodeUid, _ := value.UID.(string)
					remaining, err := deletePods(nc.kubeClient, nc.recorder, value.Value, nodeUid, nc.daemonSetStore)
					if err != nil {
						utilruntime.HandleError(fmt.Errorf("unable to evict node %q: %v", value.Value, err))
						return false, 0
					}

					if remaining {
						glog.Infof("Pods awaiting deletion due to NodeController eviction")
					}
					return true, 0
				})
			}
		}, nodeEvictionPeriod, wait.NeverStop)
	}()
}
// Run starts all of this controller's control loops
func (ctrl *PersistentVolumeController) Run(stopCh <-chan struct{}) {
	glog.V(1).Infof("starting PersistentVolumeController")
	ctrl.initializeCaches(ctrl.volumeSource, ctrl.claimSource)
	go ctrl.volumeController.Run(stopCh)
	go ctrl.claimController.Run(stopCh)
	go ctrl.classReflector.RunUntil(stopCh)
	go wait.Until(ctrl.volumeWorker, time.Second, stopCh)
	go wait.Until(ctrl.claimWorker, time.Second, stopCh)

	<-stopCh

	ctrl.claimQueue.ShutDown()
	ctrl.volumeQueue.ShutDown()
}
func (fdc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
	go fdc.deploymentController.Run(stopCh)
	fdc.fedDeploymentInformer.Start()
	fdc.fedPodInformer.Start()

	fdc.deploymentDeliverer.StartWithHandler(func(item *fedutil.DelayingDelivererItem) {
		fdc.deploymentWorkQueue.Add(item.Key)
	})
	fdc.clusterDeliverer.StartWithHandler(func(_ *fedutil.DelayingDelivererItem) {
		fdc.reconcileDeploymentsOnClusterChange()
	})

	// Wait until the cluster is synced to prevent the update storm at the very beginning.
	for !fdc.isSynced() {
		time.Sleep(5 * time.Millisecond)
		glog.V(3).Infof("Waiting for controller to sync up")
	}

	for i := 0; i < workers; i++ {
		go wait.Until(fdc.worker, time.Second, stopCh)
	}

	fedutil.StartBackoffGC(fdc.deploymentBackoff, stopCh)

	<-stopCh
	glog.Infof("Shutting down DeploymentController")
	fdc.deploymentDeliverer.Stop()
	fdc.clusterDeliverer.Stop()
	fdc.deploymentWorkQueue.ShutDown()
	fdc.fedDeploymentInformer.Stop()
	fdc.fedPodInformer.Stop()
}
// Runs e; will not return until stopCh is closed. workers determines how many
// endpoints will be handled in parallel.
func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	defer e.queue.ShutDown()

	go e.serviceController.Run(stopCh)
	go e.podController.Run(stopCh)

	if !cache.WaitForCacheSync(stopCh, e.podStoreSynced) {
		return
	}

	for i := 0; i < workers; i++ {
		go wait.Until(e.worker, time.Second, stopCh)
	}
	go func() {
		defer utilruntime.HandleCrash()
		time.Sleep(5 * time.Minute) // give time for our cache to fill
		e.checkLeftoverEndpoints()
	}()

	if e.internalPodInformer != nil {
		go e.internalPodInformer.Run(stopCh)
	}

	<-stopCh
}
示例#9
0
func startKubelet(k kubelet.KubeletBootstrap, podCfg *config.PodConfig, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps) {
	// start the kubelet
	go wait.Until(func() { k.Run(podCfg.Updates()) }, 0, wait.NeverStop)

	// start the kubelet server
	if kubeCfg.EnableServer {
		go wait.Until(func() {
			k.ListenAndServe(net.ParseIP(kubeCfg.Address), uint(kubeCfg.Port), kubeDeps.TLSOptions, kubeDeps.Auth, kubeCfg.EnableDebuggingHandlers)
		}, 0, wait.NeverStop)
	}
	if kubeCfg.ReadOnlyPort > 0 {
		go wait.Until(func() {
			k.ListenAndServeReadOnly(net.ParseIP(kubeCfg.Address), uint(kubeCfg.ReadOnlyPort))
		}, 0, wait.NeverStop)
	}
}
示例#10
0
// Start starts resource collector and connects to the standalone Cadvisor pod
// then repeatedly runs collectStats.
func (r *ResourceCollector) Start() {
	// Get the cgroup container names for kubelet and docker
	kubeletContainer, err := getContainerNameForProcess(kubeletProcessName, "")
	dockerContainer, err := getContainerNameForProcess(dockerProcessName, dockerPidFile)
	if err == nil {
		systemContainers = map[string]string{
			stats.SystemContainerKubelet: kubeletContainer,
			stats.SystemContainerRuntime: dockerContainer,
		}
	} else {
		framework.Failf("Failed to get docker container name in test-e2e-node resource collector.")
	}

	wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
		var err error
		r.client, err = cadvisorclient.NewClient(fmt.Sprintf("http://localhost:%d/", cadvisorPort))
		if err == nil {
			return true, nil
		}
		return false, err
	})

	Expect(r.client).NotTo(BeNil(), "cadvisor client not ready")

	r.request = &cadvisorapiv2.RequestOptions{IdType: "name", Count: 1, Recursive: false}
	r.stopCh = make(chan struct{})

	oldStatsMap := make(map[string]*cadvisorapiv2.ContainerStats)
	go wait.Until(func() { r.collectStats(oldStatsMap) }, r.pollingInterval, r.stopCh)
}
示例#11
0
// RunUntil starts the controller until the provided ch is closed.
func (c *Repair) RunUntil(ch chan struct{}) {
	wait.Until(func() {
		if err := c.RunOnce(); err != nil {
			runtime.HandleError(err)
		}
	}, c.interval, ch)
}
func (frsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
	go frsc.replicaSetController.Run(stopCh)
	frsc.fedReplicaSetInformer.Start()
	frsc.fedPodInformer.Start()

	frsc.replicasetDeliverer.StartWithHandler(func(item *fedutil.DelayingDelivererItem) {
		frsc.replicasetWorkQueue.Add(item.Key)
	})
	frsc.clusterDeliverer.StartWithHandler(func(_ *fedutil.DelayingDelivererItem) {
		frsc.reconcileReplicaSetsOnClusterChange()
	})

	for !frsc.isSynced() {
		time.Sleep(5 * time.Millisecond)
	}

	for i := 0; i < workers; i++ {
		go wait.Until(frsc.worker, time.Second, stopCh)
	}

	fedutil.StartBackoffGC(frsc.replicaSetBackoff, stopCh)

	<-stopCh
	glog.Infof("Shutting down ReplicaSetController")
	frsc.replicasetDeliverer.Stop()
	frsc.clusterDeliverer.Stop()
	frsc.replicasetWorkQueue.ShutDown()
	frsc.fedReplicaSetInformer.Stop()
	frsc.fedPodInformer.Stop()
}
示例#13
0
// Run the main goroutine responsible for watching and syncing jobs.
func (jm *CronJobController) Run(stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	glog.Infof("Starting CronJob Manager")
	// Check things every 10 second.
	go wait.Until(jm.SyncAll, 10*time.Second, stopCh)
	<-stopCh
	glog.Infof("Shutting down CronJob Manager")
}
示例#14
0
// RunUntil starts a watch and handles watch events. Will restart the watch if it is closed.
// RunUntil starts a goroutine and returns immediately. It will exit when stopCh is closed.
func (r *Reflector) RunUntil(stopCh <-chan struct{}) {
	glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
	go wait.Until(func() {
		if err := r.ListAndWatch(stopCh); err != nil {
			utilruntime.HandleError(err)
		}
	}, r.period, stopCh)
}
示例#15
0
// Run starts a watch and handles watch events. Will restart the watch if it is closed.
// Run starts a goroutine and returns immediately.
func (r *Reflector) Run() {
	glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
	go wait.Until(func() {
		if err := r.ListAndWatch(wait.NeverStop); err != nil {
			utilruntime.HandleError(err)
		}
	}, r.period, wait.NeverStop)
}
// Run begins quota controller using the specified number of workers
func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	go rq.rqController.Run(stopCh)
	// the controllers that replenish other resources to respond rapidly to state changes
	for _, replenishmentController := range rq.replenishmentControllers {
		go replenishmentController.Run(stopCh)
	}
	// the workers that chug through the quota calculation backlog
	for i := 0; i < workers; i++ {
		go wait.Until(rq.worker(rq.queue), time.Second, stopCh)
		go wait.Until(rq.worker(rq.missingUsageQueue), time.Second, stopCh)
	}
	// the timer for how often we do a full recalculation across all quotas
	go wait.Until(func() { rq.enqueueAll() }, rq.resyncPeriod(), stopCh)
	<-stopCh
	glog.Infof("Shutting down ResourceQuotaController")
	rq.queue.ShutDown()
}
// Run starts observing the system with the specified number of workers.
func (nm *NamespaceController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	go nm.controller.Run(stopCh)
	for i := 0; i < workers; i++ {
		go wait.Until(nm.worker, time.Second, stopCh)
	}
	<-stopCh
	glog.Infof("Shutting down NamespaceController")
	nm.queue.ShutDown()
}
// Run begins watching and syncing.
func (dc *DeploymentController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	defer dc.queue.ShutDown()
	defer dc.progressQueue.ShutDown()

	glog.Infof("Starting deployment controller")

	if !cache.WaitForCacheSync(stopCh, dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) {
		return
	}

	for i := 0; i < workers; i++ {
		go wait.Until(dc.worker, time.Second, stopCh)
	}
	go wait.Until(dc.progressWorker, time.Second, stopCh)

	<-stopCh
	glog.Infof("Shutting down deployment controller")
}
示例#19
0
// Run begins watching and syncing.
func (e *quotaEvaluator) run() {
	defer utilruntime.HandleCrash()

	for i := 0; i < e.workers; i++ {
		go wait.Until(e.doWork, time.Second, e.stopCh)
	}
	<-e.stopCh
	glog.Infof("Shutting down quota evaluator")
	e.queue.ShutDown()
}
示例#20
0
// RunKubernetesNamespaces periodically makes sure that all internal namespaces exist
func (c *Controller) RunKubernetesNamespaces(ch chan struct{}) {
	wait.Until(func() {
		// Loop the system namespace list, and create them if they do not exist
		for _, ns := range c.SystemNamespaces {
			if err := c.CreateNamespaceIfNeeded(ns); err != nil {
				runtime.HandleError(fmt.Errorf("unable to create required kubernetes system namespace %s: %v", ns, err))
			}
		}
	}, c.SystemNamespacesInterval, ch)
}
示例#21
0
// Run begins watching and syncing.
func (cc *ClusterController) Run() {
	defer utilruntime.HandleCrash()
	go cc.clusterController.Run(wait.NeverStop)
	// monitor cluster status periodically, in phase 1 we just get the health state from "/healthz"
	go wait.Until(func() {
		if err := cc.UpdateClusterStatus(); err != nil {
			glog.Errorf("Error monitoring cluster status: %v", err)
		}
	}, cc.clusterMonitorPeriod, wait.NeverStop)
}
示例#22
0
// RunKubernetesService periodically updates the kubernetes service
func (c *Controller) RunKubernetesService(ch chan struct{}) {
	wait.Until(func() {
		// Service definition is not reconciled after first
		// run, ports and type will be corrected only during
		// start.
		if err := c.UpdateKubernetesService(false); err != nil {
			runtime.HandleError(fmt.Errorf("unable to sync kubernetes service: %v", err))
		}
	}, c.EndpointInterval, ch)
}
func (cm *containerManagerImpl) Start(node *v1.Node) error {
	// cache the node Info including resource capacity and
	// allocatable of the node
	cm.nodeInfo = node
	// Setup the node
	if err := cm.setupNode(); err != nil {
		return err
	}
	// Don't run a background thread if there are no ensureStateFuncs.
	hasEnsureStateFuncs := false
	for _, cont := range cm.systemContainers {
		if cont.ensureStateFunc != nil {
			hasEnsureStateFuncs = true
			break
		}
	}
	if hasEnsureStateFuncs {
		// Run ensure state functions every minute.
		go wait.Until(func() {
			for _, cont := range cm.systemContainers {
				if cont.ensureStateFunc != nil {
					if err := cont.ensureStateFunc(cont.manager); err != nil {
						glog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err)
					}
				}
			}
		}, time.Minute, wait.NeverStop)

	}

	if len(cm.periodicTasks) > 0 {
		go wait.Until(func() {
			for _, task := range cm.periodicTasks {
				if task != nil {
					task()
				}
			}
		}, 5*time.Minute, wait.NeverStop)
	}

	return nil
}
示例#24
0
func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
	glog.V(0).Infof("Starting disruption controller")
	if dc.kubeClient != nil {
		glog.V(0).Infof("Sending events to api server.")
		dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dc.kubeClient.Core().Events("")})
	} else {
		glog.V(0).Infof("No api server defined - no events will be sent to API server.")
	}
	go dc.pdbController.Run(stopCh)
	go dc.podController.Run(stopCh)
	go dc.rcController.Run(stopCh)
	go dc.rsController.Run(stopCh)
	go dc.dController.Run(stopCh)
	go dc.ssController.Run(stopCh)
	go wait.Until(dc.worker, time.Second, stopCh)
	go wait.Until(dc.recheckWorker, time.Second, stopCh)

	<-stopCh
	glog.V(0).Infof("Shutting down disruption controller")
}
示例#25
0
// Run runs the statefulset controller.
func (psc *StatefulSetController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	glog.Infof("Starting statefulset controller")
	go psc.podController.Run(stopCh)
	go psc.psController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go wait.Until(psc.worker, time.Second, stopCh)
	}
	<-stopCh
	glog.Infof("Shutting down statefulset controller")
	psc.queue.ShutDown()
}
func (m *containerManager) Start() error {
	// TODO: check if the required cgroups are mounted.
	if len(m.cgroupsName) != 0 {
		manager, err := createCgroupManager(m.cgroupsName)
		if err != nil {
			return err
		}
		m.cgroupsManager = manager
	}
	go wait.Until(m.doWork, 5*time.Minute, wait.NeverStop)
	return nil
}
示例#27
0
// nodesSyncLoop lists nodes every 15 seconds, calling Update() on the TunnelList
// each time (Update() is a noop if no changes are necessary).
func (c *SSHTunneler) nodesSyncLoop() {
	// TODO (cjcullen) make this watch.
	go wait.Until(func() {
		addrs, err := c.getAddresses()
		glog.V(4).Infof("Calling update w/ addrs: %v", addrs)
		if err != nil {
			glog.Errorf("Failed to getAddresses: %v", err)
		}
		c.tunnels.Update(addrs)
		atomic.StoreInt64(&c.lastSync, c.clock.Now().Unix())
	}, 15*time.Second, c.stopChan)
}
示例#28
0
// Create a new Cacher responsible from service WATCH and LIST requests from its
// internal cache and updating its cache in the background based on the given
// configuration.
func NewCacherFromConfig(config CacherConfig) *Cacher {
	watchCache := newWatchCache(config.CacheCapacity, config.KeyFunc, config.GetAttrsFunc)
	listerWatcher := newCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc)

	// Give this error when it is constructed rather than when you get the
	// first watch item, because it's much easier to track down that way.
	if obj, ok := config.Type.(runtime.Object); ok {
		if err := runtime.CheckCodec(config.Codec, obj); err != nil {
			panic("storage codec doesn't seem to match given type: " + err.Error())
		}
	}

	stopCh := make(chan struct{})
	cacher := &Cacher{
		ready:       newReady(),
		storage:     config.Storage,
		objectType:  reflect.TypeOf(config.Type),
		watchCache:  watchCache,
		reflector:   cache.NewReflector(listerWatcher, config.Type, watchCache, 0),
		versioner:   config.Versioner,
		triggerFunc: config.TriggerPublisherFunc,
		watcherIdx:  0,
		watchers: indexedWatchers{
			allWatchers:   make(map[int]*cacheWatcher),
			valueWatchers: make(map[string]watchersMap),
		},
		// TODO: Figure out the correct value for the buffer size.
		incoming:              make(chan watchCacheEvent, 100),
		dispatchTimeoutBudget: newTimeBudget(stopCh),
		// We need to (potentially) stop both:
		// - wait.Until go-routine
		// - reflector.ListAndWatch
		// and there are no guarantees on the order that they will stop.
		// So we will be simply closing the channel, and synchronizing on the WaitGroup.
		stopCh: stopCh,
	}
	watchCache.SetOnEvent(cacher.processEvent)
	go cacher.dispatchEvents()

	cacher.stopWg.Add(1)
	go func() {
		defer cacher.stopWg.Done()
		wait.Until(
			func() {
				if !cacher.isStopped() {
					cacher.startCaching(stopCh)
				}
			}, time.Second, stopCh,
		)
	}()
	return cacher
}
// Run the main goroutine responsible for watching and syncing jobs.
func (cc *CertificateController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	defer cc.queue.ShutDown()

	go cc.csrController.Run(stopCh)

	glog.Infof("Starting certificate controller manager")
	for i := 0; i < workers; i++ {
		go wait.Until(cc.worker, time.Second, stopCh)
	}
	<-stopCh
	glog.Infof("Shutting down certificate controller")
}
func (c *APIServiceRegistrationController) Run(stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	defer c.queue.ShutDown()
	defer glog.Infof("Shutting down APIServiceRegistrationController")

	glog.Infof("Starting APIServiceRegistrationController")

	// only start one worker thread since its a slow moving API and the discovery server adding bits
	// aren't threadsafe
	go wait.Until(c.runWorker, time.Second, stopCh)

	<-stopCh
}