Exemple #1
0
func startPStoreLoops(
	stats *datastructs.ApplicationStatuses,
	consumerBuilders []*pstore.ConsumerWithMetricsBuilder,
	logger *log.Logger,
	maybeNilCoordBuilder coordinatorBuilderType) []*totalCountType {
	result := make([]*totalCountType, len(consumerBuilders))
	for i := range result {
		pstoreHandler := newPStoreHandler(
			stats.ApplicationList(),
			consumerBuilders[i],
			maybeNilCoordBuilder)
		result[i] = pstoreHandler.TotalCount()
		var attributes pstore.ConsumerAttributes
		pstoreHandler.Attributes(&attributes)
		refreshRate := *fPStoreUpdateFrequency
		if attributes.RollUpSpan > 0 {
			refreshRate = attributes.RollUpSpan
		}
		if err := pstoreHandler.RegisterMetrics(); err != nil {
			log.Fatal(err)
		}
		go func(handler *pstoreHandlerType, refreshRate time.Duration) {
			// persistent storage writing goroutine. Write every 30s by default.
			// Notice that this single goroutine handles all the persistent
			// storage writing as multiple goroutines must not access the
			// pstoreHandler instance. accessing pstoreHandler metrics is the
			// one exception to this rule.
			for {
				metricStore := stats.Store()
				writeTime := time.Now()
				handler.StartVisit()
				metricStore.VisitAllEndpoints(handler)
				handler.EndVisit(metricStore)
				writeDuration := time.Now().Sub(writeTime)
				if writeDuration < refreshRate {
					time.Sleep(refreshRate - writeDuration)
				}
			}
		}(pstoreHandler, refreshRate)
	}
	return result
}
Exemple #2
0
func startCollector(
	appStats *datastructs.ApplicationStatuses,
	connectionErrors *connectionErrorsType,
	totalCounts totalCountUpdaterType,
	metricNameAdder suggest.Adder,
	memoryChecker memoryCheckerType) {
	collector.SetConcurrentPolls(*fPollCount)
	collector.SetConcurrentConnects(*fConnectionCount)

	sweepDurationDist := tricorder.NewGeometricBucketer(1, 100000.0).NewCumulativeDistribution()
	collectionBucketer := tricorder.NewGeometricBucketer(1e-4, 100.0)
	collectionTimesDist := collectionBucketer.NewCumulativeDistribution()
	tricorderCollectionTimesDist := collectionBucketer.NewCumulativeDistribution()
	snmpCollectionTimesDist := collectionBucketer.NewCumulativeDistribution()
	jsonCollectionTimesDist := collectionBucketer.NewCumulativeDistribution()
	changedMetricsPerEndpointDist := tricorder.NewGeometricBucketer(1.0, 10000.0).NewCumulativeDistribution()

	if err := tricorder.RegisterMetric(
		"collector/collectionTimes",
		collectionTimesDist,
		units.Second,
		"Collection Times"); err != nil {
		log.Fatal(err)
	}
	if err := tricorder.RegisterMetric(
		"collector/collectionTimes_tricorder",
		tricorderCollectionTimesDist,
		units.Second,
		"Tricorder Collection Times"); err != nil {
		log.Fatal(err)
	}
	if err := tricorder.RegisterMetric(
		"collector/collectionTimes_snmp",
		snmpCollectionTimesDist,
		units.Second,
		"SNMP Collection Times"); err != nil {
		log.Fatal(err)
	}
	if err := tricorder.RegisterMetric(
		"collector/collectionTimes_json",
		jsonCollectionTimesDist,
		units.Second,
		"JSON Collection Times"); err != nil {
		log.Fatal(err)
	}
	if err := tricorder.RegisterMetric(
		"collector/changedMetricsPerEndpoint",
		changedMetricsPerEndpointDist,
		units.None,
		"Changed metrics per sweep"); err != nil {
		log.Fatal(err)
	}
	if err := tricorder.RegisterMetric(
		"collector/sweepDuration",
		sweepDurationDist,
		units.Millisecond,
		"Sweep duration"); err != nil {
		log.Fatal(err)
	}
	programStartTime := time.Now()
	if err := tricorder.RegisterMetric(
		"collector/elapsedTime",
		func() time.Duration {
			return time.Now().Sub(programStartTime)
		},
		units.Second,
		"elapsed time"); err != nil {
		log.Fatal(err)
	}

	byProtocolDist := map[string]*tricorder.CumulativeDistribution{
		"tricorder": tricorderCollectionTimesDist,
		"snmp":      snmpCollectionTimesDist,
		"json":      jsonCollectionTimesDist,
	}

	// Metric collection goroutine. Collect metrics periodically.
	go func() {
		// We assign each endpoint its very own nameSetType instance
		// to store metric names already sent to suggest.
		// Only that endpoint's fetch goroutine reads and modifies
		// the contents of its nameSetType instance. Although
		// this goroutine creates nameSetType instances and manages
		// the references to them, it never reads or modifies the
		// contents of any nameSetType instance after creating it.
		endpointToNamesSentToSuggest := make(
			map[*collector.Endpoint]nameSetType)
		for {
			endpoints, metricStore := appStats.ActiveEndpointIds()
			sweepTime := time.Now()
			for _, endpoint := range endpoints {
				namesSentToSuggest := endpointToNamesSentToSuggest[endpoint]
				if namesSentToSuggest == nil {
					namesSentToSuggest = make(nameSetType)
					endpointToNamesSentToSuggest[endpoint] = namesSentToSuggest
				}
				logger := &loggerType{
					Store:               metricStore,
					AppStats:            appStats,
					ConnectionErrors:    connectionErrors,
					CollectionTimesDist: collectionTimesDist,
					ByProtocolDist:      byProtocolDist,
					ChangedMetricsDist:  changedMetricsPerEndpointDist,
					NamesSentToSuggest:  namesSentToSuggest,
					MetricNameAdder:     metricNameAdder,
					TotalCounts:         totalCounts,
				}

				endpoint.Poll(sweepTime, logger)
			}
			sweepDuration := time.Now().Sub(sweepTime)
			sweepDurationDist.Add(sweepDuration)
			memoryChecker.Check()
			if sweepDuration < *fCollectionFrequency {
				time.Sleep((*fCollectionFrequency) - sweepDuration)
			}
		}
	}()
}
Exemple #3
0
func query(
	endpoints *datastructs.ApplicationStatuses,
	metricName string,
	aggregatorGen tsdb.AggregatorGenerator,
	start, end float64,
	options *QueryOptions) (result *tsdb.TaggedTimeSeriesSet, err error) {
	if options == nil {
		options = &QueryOptions{}
	}
	apps, store := endpoints.AllWithStore()
	var taggedTimeSeriesSlice []tsdb.TaggedTimeSeries
	var metricNameFound bool
	if options.GroupByHostName && options.GroupByAppName {
		for i := range apps {
			if options.isIncluded(apps[i].EndpointId.HostName(), apps[i].Name) {
				timeSeries, ok := store.TsdbTimeSeries(
					metricName,
					apps[i].EndpointId,
					start,
					end)
				if ok {
					metricNameFound = true
					var aggregator tsdb.Aggregator
					aggregator, err = aggregatorGen(start, end)
					if err != nil {
						return
					}
					aggregator.Add(timeSeries)
					aggregatedTimeSeries := aggregator.Aggregate()
					if len(aggregatedTimeSeries) != 0 {
						taggedTimeSeriesSlice = append(
							taggedTimeSeriesSlice, tsdb.TaggedTimeSeries{
								Tags: tsdb.TagSet{
									HostName: apps[i].EndpointId.HostName(),
									AppName:  apps[i].Name,
								},
								Values: aggregatedTimeSeries,
							})
					}
				}
			}
		}
	} else {
		aggregatorMap := make(map[tsdb.TagSet]tsdb.Aggregator)
		for i := range apps {
			if options.isIncluded(apps[i].EndpointId.HostName(), apps[i].Name) {
				timeSeries, ok := store.TsdbTimeSeries(
					metricName,
					apps[i].EndpointId,
					start,
					end)
				if ok {
					metricNameFound = true
					var tagSet tsdb.TagSet
					if options.GroupByHostName {
						tagSet.HostName = apps[i].EndpointId.HostName()
					}
					if options.GroupByAppName {
						tagSet.AppName = apps[i].Name
					}
					aggregator := aggregatorMap[tagSet]
					if aggregator == nil {
						aggregator, err = aggregatorGen(start, end)
						if err != nil {
							return
						}
						aggregatorMap[tagSet] = aggregator
					}
					aggregator.Add(timeSeries)
				}
			}
		}
		if metricNameFound {
			for k, v := range aggregatorMap {
				aggregatedTimeSeries := v.Aggregate()
				if len(aggregatedTimeSeries) != 0 {
					taggedTimeSeriesSlice = append(
						taggedTimeSeriesSlice, tsdb.TaggedTimeSeries{
							Tags:   k,
							Values: aggregatedTimeSeries,
						})
				}
			}
		}
	}
	if metricNameFound {
		return &tsdb.TaggedTimeSeriesSet{
			MetricName:        metricName,
			Data:              taggedTimeSeriesSlice,
			GroupedByHostName: options.GroupByHostName,
			GroupedByAppName:  options.GroupByAppName,
		}, nil
	}
	return nil, ErrNoSuchMetric
}