Ejemplo n.º 1
0
Archivo: kv.go Proyecto: tomzhang/p2
// WatchPods watches the key-value store for any changes under the given key
// prefix. The resulting manifests are emitted on podChan. WatchPods does not
// return in the event of an error, but it will emit the error on errChan. To
// terminate WatchPods, close quitChan.
//
// All the values under the given key prefix must be pod manifests. Emitted
// manifests might be unchanged from the last time they were read. It is the
// caller's responsibility to filter out unchanged manifests.
func (c consulStore) WatchPods(keyPrefix string, quitChan <-chan struct{}, errChan chan<- error, podChan chan<- []ManifestResult) {
	defer close(podChan)

	kvPairsChan := make(chan api.KVPairs)
	go consulutil.WatchPrefix(keyPrefix, c.client.KV(), kvPairsChan, quitChan, errChan)
	for kvPairs := range kvPairsChan {
		manifests := make([]ManifestResult, 0, len(kvPairs))
		for _, pair := range kvPairs {
			manifest, err := pods.ManifestFromBytes(pair.Value)
			if err != nil {
				select {
				case <-quitChan:
					return
				case errChan <- util.Errorf("Could not parse pod manifest at %s: %s. Content follows: \n%s", pair.Key, err, pair.Value):
				}
			} else {
				manifests = append(manifests, ManifestResult{manifest, pair.Key})
			}
		}
		select {
		case <-quitChan:
			return
		case podChan <- manifests:
		}
	}
}
Ejemplo n.º 2
0
Archivo: kv.go Proyecto: drcapulet/p2
// Does the same thing as WatchPods, but does so on all the nodes instead
func (c consulStore) WatchAllPods(
	podPrefix PodPrefix,
	quitChan <-chan struct{},
	errChan chan<- error,
	podChan chan<- []ManifestResult,
	pauseTime time.Duration,
) {
	defer close(podChan)

	kvPairsChan := make(chan api.KVPairs)
	go consulutil.WatchPrefix(string(podPrefix), c.client.KV(), kvPairsChan, quitChan, errChan, pauseTime)
	for kvPairs := range kvPairsChan {
		manifests := make([]ManifestResult, 0, len(kvPairs))
		for _, pair := range kvPairs {
			manifestResult, err := c.manifestResultFromPair(pair)
			if err != nil {
				select {
				case <-quitChan:
					return
				case errChan <- util.Errorf("Could not parse pod manifest at %s: %s. Content follows: \n%s", pair.Key, err, pair.Value):
				}
			} else {
				manifests = append(manifests, manifestResult)
			}
		}
		select {
		case <-quitChan:
			return
		case podChan <- manifests:
		}
	}
}
Ejemplo n.º 3
0
// Watch the health tree and write the whole subtree on the chan passed by caller
// the result channel argument _must be buffered_
// Any errors are passed, best effort, over errCh
func (c consulHealthChecker) WatchHealth(
	resultCh chan []*health.Result,
	errCh chan<- error,
	quitCh <-chan struct{},
) {
	// closed by watchPrefix when we close quitWatch
	inCh := make(chan api.KVPairs)
	watchErrCh := make(chan error)
	go consulutil.WatchPrefix("health/", c.kv, inCh, quitCh, watchErrCh, 1*time.Second)
	publishErrCh := publishLatestHealth(inCh, quitCh, resultCh)

	for {
		select {
		case <-quitCh:
			return
		case err := <-watchErrCh:
			select {
			case errCh <- err:
			case <-quitCh:
				return
			default:
			}
		case err := <-publishErrCh:
			select {
			case errCh <- err:
			case <-quitCh:
				return
			default:
			}
		}
	}
}
Ejemplo n.º 4
0
// Like WatchNew() but instead of returning raw fields.RC types it wraps them
// in a struct that indicates what types of locks are held on the RC. This is
// useful in reducing failed lock attempts in the RC farms which have a latency
// cost. WatchNewWithLockInfo() can retrieve the contents of the entire
// replication controller lock tree once and farms can only attempt to acquire
// locks that were not held at some recent time
//
// Since lock information is retrieved once per update to the RC list, it's
// possible that lock information will be out of date as the list is processed.
// However, a subsequent update will get the correct view of the world so the
// behavior should be correct
func (s *consulStore) WatchNewWithRCLockInfo(quit <-chan struct{}) (<-chan []RCLockResult, <-chan error) {
	inCh := make(chan api.KVPairs)
	lockInfoErrCh := make(chan error)
	combinedErrCh := make(chan error)

	rcCh, rcErrCh := publishLatestRCs(inCh, quit)
	go consulutil.WatchPrefix(rcTree+"/", s.kv, inCh, quit, rcErrCh, 1*time.Second)

	// Process RC updates and augment them with lock information
	outCh, lockInfoErrCh := s.publishLatestRCsWithLockInfo(rcCh, quit)

	// Fan-in the two error channels into one source
	go func() {
		defer close(combinedErrCh)
		var err error
		for {
			select {
			case err = <-lockInfoErrCh:
			case err = <-rcErrCh:
			case <-quit:
				return
			}

			select {
			case combinedErrCh <- err:
			case <-quit:
				return
			}
		}
	}()
	return outCh, combinedErrCh
}
Ejemplo n.º 5
0
func (s *consulStore) WatchNew(quit <-chan struct{}) (<-chan []fields.RC, <-chan error) {
	outCh := make(chan []fields.RC)
	errCh := make(chan error)
	inCh := make(chan api.KVPairs)

	go consulutil.WatchPrefix(kp.RC_TREE, s.kv, inCh, quit, errCh)

	go func() {
		defer close(outCh)
		defer close(errCh)

		for listed := range inCh {
			out, err := s.kvpsToRCs(listed)
			if err != nil {
				select {
				case errCh <- err:
				case <-quit:
				}
			} else {
				select {
				case outCh <- out:
				case <-quit:
				}
			}
		}
	}()

	return outCh, errCh
}
Ejemplo n.º 6
0
// Watches the consul store for changes to the RC tree and attempts to return
// the full RC list on each update.
//
// Because processing the full list of RCs may take a large amount of time,
// particularly when there are 100s of RCs, WatchNew() takes care to drop
// writes to the output channel if they're being consumed too slowly.  It will
// block writing a value to the output channel until 1) it is read or 2) a new
// value comes in, in which case that value will be written instead
func (s *consulStore) WatchNew(quit <-chan struct{}) (<-chan []fields.RC, <-chan error) {
	inCh := make(chan api.KVPairs)

	outCh, errCh := publishLatestRCs(inCh, quit)
	go consulutil.WatchPrefix(rcTree+"/", s.kv, inCh, quit, errCh, 1*time.Second)

	return outCh, errCh
}
Ejemplo n.º 7
0
func (s consulStore) Watch(quit <-chan struct{}) (<-chan []roll_fields.Update, <-chan error) {
	inCh := make(chan api.KVPairs)

	outCh, errCh := publishLatestRolls(inCh, quit)
	go consulutil.WatchPrefix(rollTree+"/", s.kv, inCh, quit, errCh, 0)

	return outCh, errCh
}
Ejemplo n.º 8
0
// Watch watches the entire podClusterTree for changes.
// It will return a blocking channel on which the client can read
// WatchedPodCluster objects. The goroutine maintaining the watch will block on
// writing to this channel so it's up to the caller to read it with haste.
func (s *consulStore) Watch(quit <-chan struct{}) <-chan WatchedPodClusters {
	inCh := make(chan api.KVPairs)
	outCh := make(chan WatchedPodClusters)
	errChan := make(chan error, 1)

	go consulutil.WatchPrefix(podClusterTree, s.kv, inCh, quit, errChan, 5*time.Second)

	go func() {
		var kvp api.KVPairs
		for {
			select {
			case <-quit:
				return
			case err := <-errChan:
				s.logger.WithError(err).Errorf("WatchPrefix returned error, recovered.")
			case kvp = <-inCh:
				if kvp == nil {
					// nothing to do
					continue
				}
			}

			clusters := WatchedPodClusters{}

			pcs, err := kvpsToPC(kvp)
			if err != nil {
				clusters.Err = err
				select {
				case <-quit:
					return
				case outCh <- clusters:
					continue
				}
			}

			for _, pc := range pcs {
				// We can't just use &pc because that would be a pointer to
				// the iteration variable
				pcPtr := pc
				clusters.Clusters = append(clusters.Clusters, &pcPtr)
			}

			select {
			case outCh <- clusters:
			case <-quit:
				return
			}
		}
	}()

	return outCh
}
Ejemplo n.º 9
0
// WatchAll watches dsTree for all the daemon sets and returns a blocking
// channel where the client can read a WatchedDaemonSetsList object which
// contain all of the daemon sets currently on the tree
func (s *consulStore) WatchAll(quitCh <-chan struct{}, pauseTime time.Duration) <-chan WatchedDaemonSetList {
	inCh := make(chan api.KVPairs)
	outCh := make(chan WatchedDaemonSetList)
	errCh := make(chan error, 1)

	// Watch for changes in the dsTree and deletedDSTree
	go consulutil.WatchPrefix(dsTree, s.kv, inCh, quitCh, errCh, pauseTime)

	go func() {
		defer close(outCh)
		defer close(errCh)

		var kvp api.KVPairs
		for {
			select {
			case <-quitCh:
				return
			case err := <-errCh:
				s.logger.WithError(err).Errorf("WatchPrefix returned error, recovered.")
			case kvp = <-inCh:
				if kvp == nil {
					// nothing to do
					continue
				}
			}

			daemonSets := WatchedDaemonSetList{}

			dsList, err := kvpsToDSs(kvp)
			if err != nil {
				daemonSets.Err = err
				select {
				case <-quitCh:
					return
				case outCh <- daemonSets:
					continue
				}
			}

			daemonSets.DaemonSets = dsList

			select {
			case outCh <- daemonSets:
			case <-quitCh:
				return
			}
		}
	}()

	return outCh
}
Ejemplo n.º 10
0
Archivo: kv.go Proyecto: drcapulet/p2
// WatchPods watches the key-value store for any changes to pods for a given
// host under a given tree.  The resulting manifests are emitted on podChan.
// WatchPods does not return in the event of an error, but it will emit the
// error on errChan. To terminate WatchPods, close quitChan.
//
// All the values under the given path must be pod manifests. Emitted
// manifests might be unchanged from the last time they were read. It is the
// caller's responsibility to filter out unchanged manifests.
func (c consulStore) WatchPods(
	podPrefix PodPrefix,
	nodename types.NodeName,
	quitChan <-chan struct{},
	errChan chan<- error,
	podChan chan<- []ManifestResult,
) {
	defer close(podChan)

	keyPrefix, err := nodePath(podPrefix, nodename)
	if err != nil {
		select {
		case <-quitChan:
		case errChan <- err:
		}
		return
	}

	kvPairsChan := make(chan api.KVPairs)
	go consulutil.WatchPrefix(keyPrefix, c.client.KV(), kvPairsChan, quitChan, errChan, 0)
	for kvPairs := range kvPairsChan {
		manifests := make([]ManifestResult, 0, len(kvPairs))
		for _, pair := range kvPairs {
			manifestResult, err := c.manifestResultFromPair(pair)
			if err != nil {
				select {
				case <-quitChan:
					return
				case errChan <- util.Errorf("Could not parse pod manifest at %s: %s. Content follows: \n%s", pair.Key, err, pair.Value):
				}
			} else {
				manifests = append(manifests, manifestResult)
			}
		}
		select {
		case <-quitChan:
			return
		case podChan <- manifests:
		}
	}
}
Ejemplo n.º 11
0
// Aggregate does the labor of querying Consul for all labels under a given type,
// applying each watcher's label selector to the results and sending those results on each
// watcher's output channel respectively.
// Aggregate will loop forever, constantly sending matches to each watcher
// until Quit() has been invoked.
func (c *consulAggregator) Aggregate() {
	outPairs := make(chan api.KVPairs)
	done := make(chan struct{})
	outErrors := make(chan error)
	go consulutil.WatchPrefix(c.path+"/", c.kv, outPairs, done, outErrors, 0)
	for {
		missedSends := 0
		loopTime := time.After(c.aggregationRate)
		select {
		case err := <-outErrors:
			c.logger.WithError(err).Errorln("Error during watch")
		case <-c.aggregatorQuit:
			return
		case pairs := <-outPairs:
			c.watcherLock.Lock()

			// replace our current cache with the latest contents of the label tree.
			c.fillCache(pairs)

			// Iterate over each watcher and send the []Labeled
			// that match the watcher's selector to the watcher's out channel.
			watcher := c.watchers
			for watcher != nil {
				if !c.sendMatches(watcher) {
					missedSends++
				}
				watcher = watcher.next
			}
			c.watcherLock.Unlock()
			c.metWatchSendMiss.Update(int64(missedSends))
		}
		select {
		case <-c.aggregatorQuit:
			return
		case <-loopTime:
			// we purposely don't case outErrors here, since loopTime lets us
			// back off of Consul watches. If an error repeatedly were occurring,
			// we could end up in a nasty busy loop.
		}
	}
}
Ejemplo n.º 12
0
Archivo: store.go Proyecto: tomzhang/p2
func (s consulStore) Watch(quit <-chan struct{}) (<-chan []rollf.Update, <-chan error) {
	outCh := make(chan []rollf.Update)
	errCh := make(chan error)
	inCh := make(chan api.KVPairs)

	go consulutil.WatchPrefix(kp.ROLL_TREE, s.kv, inCh, quit, errCh)

	go func() {
		defer close(outCh)
		defer close(errCh)

		for listed := range inCh {
			out := make([]rollf.Update, 0, len(listed))
			for _, kvp := range listed {
				var next rollf.Update
				if err := json.Unmarshal(kvp.Value, &next); err != nil {
					select {
					case errCh <- err:
					case <-quit:
						// stop processing this kvp list; inCh should be closed
						// in a moment
						break
					}
				} else {
					out = append(out, next)
				}
			}
			select {
			case outCh <- out:
			case <-quit:
			}
		}
	}()

	return outCh, errCh
}