Esempio n. 1
0
// finishRequest makes a given resultFunc asynchronous and handles errors returned by the response.
// Any api.Status object returned is considered an "error", which interrupts the normal response flow.
func finishRequest(timeout time.Duration, fn resultFunc) (result runtime.Object, err error) {
	// these channels need to be buffered to prevent the goroutine below from hanging indefinitely
	// when the select statement reads something other than the one the goroutine sends on.
	ch := make(chan runtime.Object, 1)
	errCh := make(chan error, 1)
	panicCh := make(chan interface{}, 1)
	go func() {
		// panics don't cross goroutine boundaries, so we have to handle ourselves
		defer utilruntime.HandleCrash(func(panicReason interface{}) {
			// Propagate to parent goroutine
			panicCh <- panicReason
		})

		if result, err := fn(); err != nil {
			errCh <- err
		} else {
			ch <- result
		}
	}()

	select {
	case result = <-ch:
		if status, ok := result.(*unversioned.Status); ok {
			return nil, errors.FromObject(status)
		}
		return result, nil
	case err = <-errCh:
		return nil, err
	case p := <-panicCh:
		panic(p)
	case <-time.After(timeout):
		return nil, errors.NewTimeoutError("request did not complete within allowed duration", 0)
	}
}
Esempio n. 2
0
// Update reconciles the list's entries with the specified addresses. Existing
// tunnels that are not in addresses are removed from entries and closed in a
// background goroutine. New tunnels specified in addresses are opened in a
// background goroutine and then added to entries.
func (l *SSHTunnelList) Update(addrs []string) {
	haveAddrsMap := make(map[string]bool)
	wantAddrsMap := make(map[string]bool)
	func() {
		l.tunnelsLock.Lock()
		defer l.tunnelsLock.Unlock()
		// Build a map of what we currently have.
		for i := range l.entries {
			haveAddrsMap[l.entries[i].Address] = true
		}
		// Determine any necessary additions.
		for i := range addrs {
			// Add tunnel if it is not in l.entries or l.adding
			if _, ok := haveAddrsMap[addrs[i]]; !ok {
				if _, ok := l.adding[addrs[i]]; !ok {
					l.adding[addrs[i]] = true
					addr := addrs[i]
					go func() {
						defer runtime.HandleCrash()
						// Actually adding tunnel to list will block until lock
						// is released after deletions.
						l.createAndAddTunnel(addr)
					}()
				}
			}
			wantAddrsMap[addrs[i]] = true
		}
		// Determine any necessary deletions.
		var newEntries []sshTunnelEntry
		for i := range l.entries {
			if _, ok := wantAddrsMap[l.entries[i].Address]; !ok {
				tunnelEntry := l.entries[i]
				glog.Infof("Removing tunnel to deleted node at %q", tunnelEntry.Address)
				go func() {
					defer runtime.HandleCrash()
					if err := tunnelEntry.Tunnel.Close(); err != nil {
						glog.Errorf("Failed to close tunnel to %q: %v", tunnelEntry.Address, err)
					}
				}()
			} else {
				newEntries = append(newEntries, l.entries[i])
			}
		}
		l.entries = newEntries
	}()
}
Esempio n. 3
0
// etcdWatch calls etcd's Watch function, and handles any errors. Meant to be called
// as a goroutine.
func (w *etcdWatcher) etcdWatch(ctx context.Context, client etcd.KeysAPI, key string, resourceVersion uint64) {
	defer utilruntime.HandleCrash()
	defer close(w.etcdError)
	defer close(w.etcdIncoming)

	// All calls to etcd are coming from this function - once it is finished
	// no other call to etcd should be generated by this watcher.
	done := func() {}

	// We need to be prepared, that Stop() can be called at any time.
	// It can potentially also be called, even before this function is called.
	// If that is the case, we simply skip all the code here.
	// See #18928 for more details.
	var watcher etcd.Watcher
	returned := func() bool {
		w.stopLock.Lock()
		defer w.stopLock.Unlock()
		if w.stopped {
			// Watcher has already been stopped - don't event initiate it here.
			return true
		}
		w.wg.Add(1)
		done = w.wg.Done
		// Perform initialization of watcher under lock - we want to avoid situation when
		// Stop() is called in the meantime (which in tests can cause etcd termination and
		// strange behavior here).
		if resourceVersion == 0 {
			latest, err := etcdGetInitialWatchState(ctx, client, key, w.list, w.quorum, w.etcdIncoming)
			if err != nil {
				w.etcdError <- err
				return true
			}
			resourceVersion = latest
		}

		opts := etcd.WatcherOptions{
			Recursive:  w.list,
			AfterIndex: resourceVersion,
		}
		watcher = client.Watcher(key, &opts)
		w.ctx, w.cancel = context.WithCancel(ctx)
		return false
	}()
	defer done()
	if returned {
		return
	}

	for {
		resp, err := watcher.Next(w.ctx)
		if err != nil {
			w.etcdError <- err
			return
		}
		w.etcdIncoming <- resp
	}
}
func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
	defer runtime.HandleCrash()
	glog.Infof("Starting Attach Detach Controller")

	go adc.reconciler.Run(stopCh)
	go adc.desiredStateOfWorldPopulator.Run(stopCh)

	<-stopCh
	glog.Infof("Shutting down Attach Detach Controller")
}
Esempio n. 5
0
func (vm *volumeManager) Run(stopCh <-chan struct{}) {
	defer runtime.HandleCrash()
	glog.Infof("Starting Kubelet Volume Manager")

	go vm.reconciler.Run(stopCh)
	go vm.desiredStateOfWorldPopulator.Run(stopCh)

	<-stopCh
	glog.Infof("Shutting down Kubelet Volume Manager")
}
Esempio n. 6
0
// Run starts the leader election loop
func (le *LeaderElector) Run() {
	defer func() {
		runtime.HandleCrash()
		le.config.Callbacks.OnStoppedLeading()
	}()
	le.acquire()
	stop := make(chan struct{})
	go le.config.Callbacks.OnStartedLeading(stop)
	le.renew()
	close(stop)
}
// Runs e; will not return until stopCh is closed. workers determines how many
// endpoints will be handled in parallel.
func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	go e.serviceController.Run(stopCh)
	go e.podController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go wait.Until(e.worker, time.Second, stopCh)
	}
	go func() {
		defer utilruntime.HandleCrash()
		time.Sleep(5 * time.Minute) // give time for our cache to fill
		e.checkLeftoverEndpoints()
	}()

	if e.internalPodInformer != nil {
		go e.internalPodInformer.Run(stopCh)
	}

	<-stopCh
	e.queue.ShutDown()
}
Esempio n. 8
0
// Run begins watching and syncing.
func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	go rsc.rsController.Run(stopCh)
	go rsc.podController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go wait.Until(rsc.worker, time.Second, stopCh)
	}
	<-stopCh
	glog.Infof("Shutting down ReplicaSet Controller")
	rsc.queue.ShutDown()
}
Esempio n. 9
0
func (l *SSHTunnelList) delayedHealthCheck(e sshTunnelEntry, delay time.Duration) {
	go func() {
		defer runtime.HandleCrash()
		time.Sleep(delay)
		if err := l.healthCheck(e); err != nil {
			glog.Errorf("Healthcheck failed for tunnel to %q: %v", e.Address, err)
			glog.Infof("Attempting once to re-establish tunnel to %q", e.Address)
			l.removeAndReAdd(e)
		}
	}()
}
Esempio n. 10
0
// Run runs the petset controller.
func (psc *PetSetController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	glog.Infof("Starting petset controller")
	go psc.podController.Run(stopCh)
	go psc.psController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go wait.Until(psc.worker, time.Second, stopCh)
	}
	<-stopCh
	glog.Infof("Shutting down petset controller")
	psc.queue.ShutDown()
}
Esempio n. 11
0
// translate pulls stuff from etcd, converts, and pushes out the outgoing channel. Meant to be
// called as a goroutine.
func (w *etcdWatcher) translate() {
	defer w.wg.Done()
	defer close(w.outgoing)
	defer utilruntime.HandleCrash()

	for {
		select {
		case err := <-w.etcdError:
			if err != nil {
				var status *unversioned.Status
				switch {
				case etcdutil.IsEtcdWatchExpired(err):
					status = &unversioned.Status{
						Status:  unversioned.StatusFailure,
						Message: err.Error(),
						Code:    http.StatusGone, // Gone
						Reason:  unversioned.StatusReasonExpired,
					}
				// TODO: need to generate errors using api/errors which has a circular dependency on this package
				//   no other way to inject errors
				// case etcdutil.IsEtcdUnreachable(err):
				//   status = errors.NewServerTimeout(...)
				default:
					status = &unversioned.Status{
						Status:  unversioned.StatusFailure,
						Message: err.Error(),
						Code:    http.StatusInternalServerError,
						Reason:  unversioned.StatusReasonInternalError,
					}
				}
				w.emit(watch.Event{
					Type:   watch.Error,
					Object: status,
				})
			}
			return
		case <-w.userStop:
			return
		case res, ok := <-w.etcdIncoming:
			if ok {
				if curLen := int64(len(w.etcdIncoming)); watchChannelHWM.Update(curLen) {
					// Monitor if this gets backed up, and how much.
					glog.V(2).Infof("watch: %v objects queued in channel.", curLen)
				}
				w.sendResult(res)
			}
			// If !ok, don't return here-- must wait for etcdError channel
			// to give an error or be closed.
		}
	}
}
Esempio n. 12
0
// Run the main goroutine responsible for watching and syncing jobs.
func (jm *JobController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	go jm.jobController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go wait.Until(jm.worker, time.Second, stopCh)
	}

	if jm.internalPodInformer != nil {
		go jm.internalPodInformer.Run(stopCh)
	}

	<-stopCh
	glog.Infof("Shutting down Job Manager")
	jm.queue.ShutDown()
}
Esempio n. 13
0
func (d *timeoutDialer) Dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) {
	var client *ssh.Client
	errCh := make(chan error, 1)
	go func() {
		defer runtime.HandleCrash()
		var err error
		client, err = d.dialer.Dial(network, addr, config)
		errCh <- err
	}()
	select {
	case err := <-errCh:
		return client, err
	case <-time.After(d.timeout):
		return nil, fmt.Errorf("timed out dialing %s:%s", network, addr)
	}
}
// Run begins quota controller using the specified number of workers
func (rq *ResourceQuotaController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	go rq.rqController.Run(stopCh)
	// the controllers that replenish other resources to respond rapidly to state changes
	for _, replenishmentController := range rq.replenishmentControllers {
		go replenishmentController.Run(stopCh)
	}
	// the workers that chug through the quota calculation backlog
	for i := 0; i < workers; i++ {
		go wait.Until(rq.worker, time.Second, stopCh)
	}
	// the timer for how often we do a full recalculation across all quotas
	go wait.Until(func() { rq.enqueueAll() }, rq.resyncPeriod(), stopCh)
	<-stopCh
	glog.Infof("Shutting down ResourceQuotaController")
	rq.queue.ShutDown()
}
Esempio n. 15
0
func (cc *cadvisorClient) exportHTTP(port uint) error {
	// Register the handlers regardless as this registers the prometheus
	// collector properly.
	mux := http.NewServeMux()
	err := cadvisorhttp.RegisterHandlers(mux, cc, "", "", "", "")
	if err != nil {
		return err
	}

	re := regexp.MustCompile(`^k8s_(?P<kubernetes_container_name>[^_\.]+)[^_]+_(?P<kubernetes_pod_name>[^_]+)_(?P<kubernetes_namespace>[^_]+)`)
	reCaptureNames := re.SubexpNames()
	cadvisorhttp.RegisterPrometheusHandler(mux, cc, "/metrics", func(name string) map[string]string {
		extraLabels := map[string]string{}
		matches := re.FindStringSubmatch(name)
		for i, match := range matches {
			if len(reCaptureNames[i]) > 0 {
				extraLabels[re.SubexpNames()[i]] = match
			}
		}
		return extraLabels
	})

	// Only start the http server if port > 0
	if port > 0 {
		serv := &http.Server{
			Addr:    fmt.Sprintf(":%d", port),
			Handler: mux,
		}

		// TODO(vmarmol): Remove this when the cAdvisor port is once again free.
		// If export failed, retry in the background until we are able to bind.
		// This allows an existing cAdvisor to be killed before this one registers.
		go func() {
			defer runtime.HandleCrash()

			err := serv.ListenAndServe()
			for err != nil {
				glog.Infof("Failed to register cAdvisor on port %d, retrying. Error: %v", port, err)
				time.Sleep(time.Minute)
				err = serv.ListenAndServe()
			}
		}()
	}

	return nil
}
Esempio n. 16
0
// Run begins watching and syncing daemon sets.
func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
	defer utilruntime.HandleCrash()
	glog.Infof("Starting Daemon Sets controller manager")
	go dsc.dsController.Run(stopCh)
	go dsc.podController.Run(stopCh)
	go dsc.nodeController.Run(stopCh)
	for i := 0; i < workers; i++ {
		go wait.Until(dsc.runWorker, time.Second, stopCh)
	}

	if dsc.internalPodInformer != nil {
		go dsc.internalPodInformer.Run(stopCh)
	}

	<-stopCh
	glog.Infof("Shutting down Daemon Set Controller")
	dsc.queue.ShutDown()
}
Esempio n. 17
0
func (c *cacheWatcher) process(initEvents []watchCacheEvent, resourceVersion uint64) {
	defer utilruntime.HandleCrash()

	for _, event := range initEvents {
		c.sendWatchCacheEvent(event)
	}
	defer close(c.result)
	defer c.Stop()
	for {
		event, ok := <-c.input
		if !ok {
			return
		}
		// only send events newer than resourceVersion
		if event.ResourceVersion > resourceVersion {
			c.sendWatchCacheEvent(event)
		}
	}
}
Esempio n. 18
0
// JitterUntil loops until stop channel is closed, running f every period.
// If jitterFactor is positive, the period is jittered before every run of f.
// If jitterFactor is not positive, the period is unchanged.
// Catches any panics, and keeps going. f may not be invoked if
// stop channel is already closed. Pass NeverStop to Until if you
// don't want it stop.
func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
	select {
	case <-stopCh:
		return
	default:
	}

	for {
		jitteredPeriod := period
		if jitterFactor > 0.0 {
			jitteredPeriod = Jitter(period, jitterFactor)
		}

		var t *time.Timer
		if !sliding {
			t = time.NewTimer(jitteredPeriod)
		}

		func() {
			defer runtime.HandleCrash()
			f()
		}()

		if sliding {
			t = time.NewTimer(jitteredPeriod)
		} else {
			// The timer we created could already have fired, so be
			// careful and check stopCh first.
			select {
			case <-stopCh:
				return
			default:
			}
		}

		select {
		case <-stopCh:
			return
		case <-t.C:
		}
	}
}
Esempio n. 19
0
// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function.
// The return value can be ignored or used to stop recording, if desired.
func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*api.Event)) watch.Interface {
	watcher := eventBroadcaster.Watch()
	go func() {
		defer utilruntime.HandleCrash()
		for {
			watchEvent, open := <-watcher.ResultChan()
			if !open {
				return
			}
			event, ok := watchEvent.Object.(*api.Event)
			if !ok {
				// This is all local, so there's no reason this should
				// ever happen.
				continue
			}
			eventHandler(event)
		}
	}()
	return watcher
}
Esempio n. 20
0
// Parallelize is a very simple framework that allow for parallelizing
// N independent pieces of work.
func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) {
	toProcess := make(chan int, pieces)
	for i := 0; i < pieces; i++ {
		toProcess <- i
	}
	close(toProcess)

	wg := sync.WaitGroup{}
	wg.Add(workers)
	for i := 0; i < workers; i++ {
		go func() {
			defer utilruntime.HandleCrash()
			defer wg.Done()
			for piece := range toProcess {
				doWorkPiece(piece)
			}
		}()
	}
	wg.Wait()
}
Esempio n. 21
0
func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp unversioned.Time, eventtype, reason, message string) {
	ref, err := api.GetReference(object)
	if err != nil {
		glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message)
		return
	}

	if !validateEventType(eventtype) {
		glog.Errorf("Unsupported event type: '%v'", eventtype)
		return
	}

	event := recorder.makeEvent(ref, eventtype, reason, message)
	event.Source = recorder.source

	go func() {
		// NOTE: events should be a non-blocking operation
		defer utilruntime.HandleCrash()
		recorder.Action(watch.Added, event)
	}()
}
Esempio n. 22
0
// Apply the new setting to the specified pod.
// If the options provide an OnCompleteFunc, the function is invoked if the update is accepted.
// Update requests are ignored if a kill pod request is pending.
func (p *podWorkers) UpdatePod(options *UpdatePodOptions) {
	pod := options.Pod
	uid := pod.UID
	var podUpdates chan UpdatePodOptions
	var exists bool

	p.podLock.Lock()
	defer p.podLock.Unlock()
	if podUpdates, exists = p.podUpdates[uid]; !exists {
		// We need to have a buffer here, because checkForUpdates() method that
		// puts an update into channel is called from the same goroutine where
		// the channel is consumed. However, it is guaranteed that in such case
		// the channel is empty, so buffer of size 1 is enough.
		podUpdates = make(chan UpdatePodOptions, 1)
		p.podUpdates[uid] = podUpdates

		// Creating a new pod worker either means this is a new pod, or that the
		// kubelet just restarted. In either case the kubelet is willing to believe
		// the status of the pod for the first pod worker sync. See corresponding
		// comment in syncPod.
		go func() {
			defer runtime.HandleCrash()
			p.managePodLoop(podUpdates)
		}()
	}
	if !p.isWorking[pod.UID] {
		p.isWorking[pod.UID] = true
		podUpdates <- *options
	} else {
		// if a request to kill a pod is pending, we do not let anything overwrite that request.
		update, found := p.lastUndeliveredWorkUpdate[pod.UID]
		if !found || update.UpdateType != kubetypes.SyncPodKill {
			p.lastUndeliveredWorkUpdate[pod.UID] = *options
		}
	}
}
Esempio n. 23
0
// Watches cadvisor for system oom's and records an event for every system oom encountered.
func (ow *realOOMWatcher) Start(ref *api.ObjectReference) error {
	request := events.Request{
		EventType: map[cadvisorapi.EventType]bool{
			cadvisorapi.EventOom: true,
		},
		ContainerName:        "/",
		IncludeSubcontainers: false,
	}
	eventChannel, err := ow.cadvisor.WatchEvents(&request)
	if err != nil {
		return err
	}

	go func() {
		defer runtime.HandleCrash()

		for event := range eventChannel.GetChannel() {
			glog.V(2).Infof("Got sys oom event from cadvisor: %v", event)
			ow.recorder.PastEventf(ref, unversioned.Time{Time: event.Timestamp}, api.EventTypeWarning, systemOOMEvent, "System OOM encountered")
		}
		glog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor")
	}()
	return nil
}
Esempio n. 24
0
// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added.
func (q *delayingType) waitingLoop() {
	defer utilruntime.HandleCrash()

	// Make a placeholder channel to use when there are no items in our list
	never := make(<-chan time.Time)

	for {
		if q.Interface.ShuttingDown() {
			// discard waiting entries
			q.waitingForAdd = nil
			return
		}

		now := q.clock.Now()

		// Add ready entries
		readyEntries := 0
		for _, entry := range q.waitingForAdd {
			if entry.readyAt.After(now) {
				break
			}
			q.Add(entry.data)
			readyEntries++
		}
		q.waitingForAdd = q.waitingForAdd[readyEntries:]

		// Set up a wait for the first item's readyAt (if one exists)
		nextReadyAt := never
		if len(q.waitingForAdd) > 0 {
			nextReadyAt = q.clock.After(q.waitingForAdd[0].readyAt.Sub(now))
		}

		select {
		case <-q.stopCh:
			return

		case <-q.heartbeat:
			// continue the loop, which will add ready items

		case <-nextReadyAt:
			// continue the loop, which will add ready items

		case waitEntry := <-q.waitingForAddCh:
			if waitEntry.readyAt.After(q.clock.Now()) {
				q.waitingForAdd = insert(q.waitingForAdd, waitEntry)
			} else {
				q.Add(waitEntry.data)
			}

			drained := false
			for !drained {
				select {
				case waitEntry := <-q.waitingForAddCh:
					if waitEntry.readyAt.After(q.clock.Now()) {
						q.waitingForAdd = insert(q.waitingForAdd, waitEntry)
					} else {
						q.Add(waitEntry.data)
					}
				default:
					drained = true
				}
			}
		}
	}
}
Esempio n. 25
0
// monitorNodeStatus verifies node status are constantly updated by kubelet, and if not,
// post "NodeReady==ConditionUnknown". It also evicts all pods if node is not ready or
// not reachable for a long period of time.
func (nc *NodeController) monitorNodeStatus() error {
	nodes, err := nc.kubeClient.Core().Nodes().List(api.ListOptions{})
	if err != nil {
		return err
	}
	for _, node := range nodes.Items {
		if !nc.knownNodeSet.Has(node.Name) {
			glog.V(1).Infof("NodeController observed a new Node: %#v", node)
			nc.recordNodeEvent(node.Name, api.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", node.Name))
			nc.cancelPodEviction(node.Name)
			nc.knownNodeSet.Insert(node.Name)
		}
	}
	// If there's a difference between lengths of known Nodes and observed nodes
	// we must have removed some Node.
	if len(nc.knownNodeSet) != len(nodes.Items) {
		observedSet := make(sets.String)
		for _, node := range nodes.Items {
			observedSet.Insert(node.Name)
		}
		deleted := nc.knownNodeSet.Difference(observedSet)
		for nodeName := range deleted {
			glog.V(1).Infof("NodeController observed a Node deletion: %v", nodeName)
			nc.recordNodeEvent(nodeName, api.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from NodeController", nodeName))
			nc.evictPods(nodeName)
			nc.knownNodeSet.Delete(nodeName)
		}
	}

	seenReady := false
	for i := range nodes.Items {
		var gracePeriod time.Duration
		var observedReadyCondition api.NodeCondition
		var currentReadyCondition *api.NodeCondition
		node := &nodes.Items[i]
		for rep := 0; rep < nodeStatusUpdateRetry; rep++ {
			gracePeriod, observedReadyCondition, currentReadyCondition, err = nc.tryUpdateNodeStatus(node)
			if err == nil {
				break
			}
			name := node.Name
			node, err = nc.kubeClient.Core().Nodes().Get(name)
			if err != nil {
				glog.Errorf("Failed while getting a Node to retry updating NodeStatus. Probably Node %s was deleted.", name)
				break
			}
		}
		if err != nil {
			glog.Errorf("Update status  of Node %v from NodeController exceeds retry count."+
				"Skipping - no pods will be evicted.", node.Name)
			continue
		}

		decisionTimestamp := nc.now()

		if currentReadyCondition != nil {
			// Check eviction timeout against decisionTimestamp
			if observedReadyCondition.Status == api.ConditionFalse &&
				decisionTimestamp.After(nc.nodeStatusMap[node.Name].readyTransitionTimestamp.Add(nc.podEvictionTimeout)) {
				if nc.evictPods(node.Name) {
					glog.V(4).Infof("Evicting pods on node %s: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout)
				}
			}
			if observedReadyCondition.Status == api.ConditionUnknown &&
				decisionTimestamp.After(nc.nodeStatusMap[node.Name].probeTimestamp.Add(nc.podEvictionTimeout)) {
				if nc.evictPods(node.Name) {
					glog.V(4).Infof("Evicting pods on node %s: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout-gracePeriod)
				}
			}
			if observedReadyCondition.Status == api.ConditionTrue {
				// We do not treat a master node as a part of the cluster for network segmentation checking.
				if !system.IsMasterNode(node) {
					seenReady = true
				}
				if nc.cancelPodEviction(node.Name) {
					glog.V(2).Infof("Node %s is ready again, cancelled pod eviction", node.Name)
				}
			}

			// Report node event.
			if currentReadyCondition.Status != api.ConditionTrue && observedReadyCondition.Status == api.ConditionTrue {
				nc.recordNodeStatusChange(node, "NodeNotReady")
				if err = nc.markAllPodsNotReady(node.Name); err != nil {
					utilruntime.HandleError(fmt.Errorf("Unable to mark all pods NotReady on node %v: %v", node.Name, err))
				}
			}

			// Check with the cloud provider to see if the node still exists. If it
			// doesn't, delete the node immediately.
			if currentReadyCondition.Status != api.ConditionTrue && nc.cloud != nil {
				exists, err := nc.nodeExistsInCloudProvider(node.Name)
				if err != nil {
					glog.Errorf("Error determining if node %v exists in cloud: %v", node.Name, err)
					continue
				}
				if !exists {
					glog.V(2).Infof("Deleting node (no longer present in cloud provider): %s", node.Name)
					nc.recordNodeEvent(node.Name, api.EventTypeNormal, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name))
					go func(nodeName string) {
						defer utilruntime.HandleCrash()
						// Kubelet is not reporting and Cloud Provider says node
						// is gone. Delete it without worrying about grace
						// periods.
						if err := nc.forcefullyDeleteNode(nodeName); err != nil {
							glog.Errorf("Unable to forcefully delete node %q: %v", nodeName, err)
						}
					}(node.Name)
					continue
				}
			}
		}
	}

	// NC don't see any Ready Node. We assume that the network is segmented and Nodes cannot connect to API server and
	// update their statuses. NC enteres network segmentation mode and cancels all evictions in progress.
	if !seenReady {
		nc.networkSegmentationMode = true
		nc.stopAllPodEvictions()
		glog.V(2).Info("NodeController is entering network segmentation mode.")
	} else {
		if nc.networkSegmentationMode {
			nc.forceUpdateAllProbeTimes()
			nc.networkSegmentationMode = false
			glog.V(2).Info("NodeController exited network segmentation mode.")
		}
	}
	return nil
}