Beispiel #1
0
// Run Start the healthchecker main loop
func Run() {
	healthchecker = proxyHealthCheckFactory()
	// Wrap with a wait.Forever to handle panics.
	go wait.Forever(func() {
		healthchecker.handlerLoop()
	}, 0)
}
// Start eager background caching of volume stats.
func (s *fsResourceAnalyzer) Start() {
	s.startOnce.Do(func() {
		if s.calcPeriod <= 0 {
			glog.Info("Volume stats collection disabled.")
			return
		}
		glog.Info("Starting FS ResourceAnalyzer")
		go wait.Forever(func() { s.updateCachedPodVolumeStats() }, s.calcPeriod)
	})
}
Beispiel #3
0
// RegisterMetricAndTrackRateLimiterUsage registers a metric ownerName_rate_limiter_use in prometheus to track
// how much used rateLimiter is and starts a goroutine that updates this metric every updatePeriod
func RegisterMetricAndTrackRateLimiterUsage(ownerName string, rateLimiter flowcontrol.RateLimiter) error {
	err := registerRateLimiterMetric(ownerName)
	if err != nil {
		return err
	}
	go wait.Forever(func() {
		metricsLock.Lock()
		defer metricsLock.Unlock()
		rateLimiterMetrics[ownerName].Set(rateLimiter.Saturation())
	}, updatePeriod)
	return nil
}
func newPortRangeAllocator(r net.PortRange, autoFill bool) PortAllocator {
	if r.Base == 0 || r.Size == 0 {
		panic("illegal argument: may not specify an empty port range")
	}
	ra := &rangeAllocator{
		PortRange: r,
		ports:     make(chan int, portsBufSize),
		rand:      rand.New(rand.NewSource(time.Now().UnixNano())),
	}
	if autoFill {
		go wait.Forever(func() { ra.fillPorts() }, nextFreePortCooldown)
	}
	return ra
}
Beispiel #5
0
func (plugin *cniNetworkPlugin) Init(host network.Host, hairpinMode componentconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error {
	var err error
	plugin.nsenterPath, err = plugin.execer.LookPath("nsenter")
	if err != nil {
		return err
	}

	plugin.host = host

	// sync network config from pluginDir periodically to detect network config updates
	go wait.Forever(func() {
		plugin.syncNetworkConfig()
	}, 10*time.Second)
	return nil
}
func (p RESTStorageProvider) postStartHookFunc(hookContext genericapiserver.PostStartHookContext) error {
	clientset, err := extensionsclient.NewForConfig(hookContext.LoopbackClientConfig)
	if err != nil {
		utilruntime.HandleError(fmt.Errorf("unable to initialize clusterroles: %v", err))
		return nil
	}

	thirdPartyControl := ThirdPartyController{
		master: p.ResourceInterface,
		client: clientset,
	}
	go wait.Forever(func() {
		if err := thirdPartyControl.SyncResources(); err != nil {
			glog.Warningf("third party resource sync failed: %v", err)
		}
	}, 10*time.Second)

	return nil
}
func (m *manager) Start() {
	// Don't start the status manager if we don't have a client. This will happen
	// on the master, where the kubelet is responsible for bootstrapping the pods
	// of the master components.
	if m.kubeClient == nil {
		glog.Infof("Kubernetes client is nil, not starting status manager.")
		return
	}

	glog.Info("Starting to sync pod status with apiserver")
	syncTicker := time.Tick(syncPeriod)
	// syncPod and syncBatch share the same go routine to avoid sync races.
	go wait.Forever(func() {
		select {
		case syncRequest := <-m.podStatusChannel:
			m.syncPod(syncRequest.podUID, syncRequest.status)
		case <-syncTicker:
			m.syncBatch()
		}
	}, 0)
}
Beispiel #8
0
func NewSourceFile(path string, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) {
	config := new(path, nodeName, period, updates)
	glog.V(1).Infof("Watching path %q", path)
	go wait.Forever(config.run, period)
}
// Start syncing probe status. This should only be called once.
func (m *manager) Start() {
	// Start syncing readiness.
	go wait.Forever(m.updateReadiness, 0)
}