func createApplicationStats( appList *datastructs.ApplicationList, logger *log.Logger, tagvAdder suggest.Adder, maybeNilMemoryManager *memoryManagerType) *datastructs.ApplicationStatuses { var astore *store.Store fmt.Println("Initialization started.") if maybeNilMemoryManager != nil { memoryManager := maybeNilMemoryManager astore = store.NewStoreBytesPerPage( *fBytesPerPage, 1, *fThreshhold, *fDegree) astore.SetExpanding(true) memoryManager.SetMemory(astore) if err := memoryManager.RegisterMetrics(); err != nil { log.Fatal(err) } } else { astore = store.NewStoreBytesPerPage( *fBytesPerPage, *fPageCount, *fThreshhold, *fDegree) } dirSpec, err := tricorder.RegisterDirectory("/store") if err != nil { log.Fatal(err) } if err := astore.RegisterMetrics(dirSpec); err != nil { log.Fatal(err) } stats := datastructs.NewApplicationStatuses(appList, astore) mdbChannel := mdbd.StartMdbDaemon(*fMdbFile, logger) machines := <-mdbChannel theHostNames := hostNames(machines.Machines) for _, aName := range theHostNames { tagvAdder.Add(aName) } stats.MarkHostsActiveExclusively( duration.TimeToFloat(time.Now()), theHostNames) fmt.Println("Initialization complete.") // Endpoint refresher goroutine go func() { for { machines := <-mdbChannel stats.MarkHostsActiveExclusively( duration.TimeToFloat(time.Now()), hostNames(machines.Machines)) } }() return stats }
// ToFloat converts a value of this type to a float64 // ToFloat panics if this type doesn't support conversion to float64 func (t Type) ToFloat(x interface{}) float64 { switch t { case Int8: return float64(x.(int8)) case Int16: return float64(x.(int16)) case Int32: return float64(x.(int32)) case Int64: return float64(x.(int64)) case Uint8: return float64(x.(uint8)) case Uint16: return float64(x.(uint16)) case Uint32: return float64(x.(uint32)) case Uint64: return float64(x.(uint64)) case Float32: return float64(x.(float32)) case Float64: return x.(float64) case GoTime: return duration.TimeToFloat(x.(time.Time)) case GoDuration: return duration.ToFloat(x.(time.Duration)) default: panic("Type doesn't support conversion to float") } }
func newStoreMetricsType() *storeMetricsType { return &storeMetricsType{ PagesPerMetricDist: kBucketer.NewNonCumulativeDistribution(), values: storePrimitiveMetricsType{ LatestEvictedTimeStamp: duration.TimeToFloat(time.Now()), }, } }
func (n *namedIteratorSameValueType) Next(r *store.Record) bool { if n.numWritten == n.numToWrite { return false } r.Info = n.info r.Value = n.value r.TimeStamp = duration.TimeToFloat(kNow) r.Active = true n.numWritten++ return true }
func (n *namedIteratorForTestingType) Next(r *store.Record) bool { if n.numWritten == n.numToWrite { return false } r.Info = kInt64MetricInfo r.Value = int64(n.numWritten) r.TimeStamp = duration.TimeToFloat(kNow) r.Active = true n.numWritten++ return true }
func (l *loggerType) LogResponse( e *collector.Endpoint, list metrics.List, timestamp time.Time) error { ts := duration.TimeToFloat(timestamp) added, err := l.Store.AddBatch( e, ts, list) if err == nil { l.reportNewNamesForSuggest(list) l.AppStats.LogChangedMetricCount(e, added) l.ChangedMetricsDist.Add(float64(added)) l.TotalCounts.Update(l.Store, e) } // This error just means that the endpoint was marked inactive // during polling. if err == store.ErrInactive { return nil } return err }
func asFloat64(r *pstore.Record) float64 { switch r.Kind { case types.Bool: if r.Value.(bool) { return 1.0 } return 0.0 case types.Int8: return float64(r.Value.(int8)) case types.Int16: return float64(r.Value.(int16)) case types.Int32: return float64(r.Value.(int32)) case types.Int64: return float64(r.Value.(int64)) case types.Uint8: return float64(r.Value.(uint8)) case types.Uint16: return float64(r.Value.(uint16)) case types.Uint32: return float64(r.Value.(uint32)) case types.Uint64: return float64(r.Value.(uint64)) case types.Float32: return float64(r.Value.(float32)) case types.Float64: return r.Value.(float64) case types.GoTime: return duration.TimeToFloat(r.Value.(time.Time)) case types.GoDuration: return duration.ToFloat( r.Value.(time.Duration)) * units.FromSeconds( r.Unit) default: panic("Unsupported type") } }
// gatherDataForEndpoint serves api/hosts pages. // metricStore is the metric store. // endpoint is the endpoint from which we are getting historical metrics. // canonicalPath is the path of the metrics or the empty string for all // metrics. canonicalPath is returned from canonicalisePath(). // history is the amount of time to go back in minutes. // If isSingleton is true, fetched metrics have to match canonicalPath // exactly. // Otherwise fetched metrics have to be found underneath canonicalPath. // On no match, gatherDataForEndpoint returns an empty // messages.EndpointMetricsList instance func gatherDataForEndpoint( metricStore *store.Store, endpoint *collector.Endpoint, canonicalPath string, history int, isSingleton bool) (result messages.EndpointMetricList) { result = make(messages.EndpointMetricList, 0) now := duration.TimeToFloat(time.Now()) appender := newEndpointMetricsAppender(&result) if canonicalPath == "" { metricStore.ByEndpointStrategy( endpoint, now-60.0*float64(history), math.Inf(1), store.GroupMetricByKey, appender) } else { metricStore.ByNameAndEndpointStrategy( canonicalPath, endpoint, now-60.0*float64(history), math.Inf(1), store.GroupMetricByKey, appender) if len(result) == 0 && !isSingleton { metricStore.ByPrefixAndEndpointStrategy( canonicalPath+"/", endpoint, now-60.0*float64(history), math.Inf(1), store.GroupMetricByKey, appender) } } sortMetricsByPath(result) return }
// LookupBatch looks up all the metrics in one go and returns the // following: // fetched: timeSeries already in this collection keyed by Metric. // values must be added to these manually. // newOnes: timeSeries just added as a result of this lookup. Since these // are new, the first value added automatically. // notFetched: timeSeries in this collection but not fetched. These // are the time series that should be marked inactive. // ok is true if metrics can be added to this instance or false if this // instance is inactive and closed for new metrics. func (c *timeSeriesCollectionType) LookupBatch( timestamp float64, mlist metrics.List) ( fetched map[*timeSeriesType]interface{}, newOnes, notFetched []*timeSeriesType, fetchedTimeStamps map[*timestampSeriesType]float64, newTs []*timestampSeriesType, notFetchedTimeStamps []*timestampSeriesType, err error) { if err = metrics.VerifyList(mlist); err != nil { return } c.lock.Lock() defer c.lock.Unlock() if !c.active { err = ErrInactive return } valueByMetric := make(map[*MetricInfo]interface{}) timestampByGroupId := make(map[int]float64) groupIds := make(map[int]bool) fetched = make(map[*timeSeriesType]interface{}) fetchedTimeStamps = make(map[*timestampSeriesType]float64) mlen := mlist.Len() for i := 0; i < mlen; i++ { var avalue metrics.Value mlist.Index(i, &avalue) kind, subType := types.FromGoValueWithSubType(avalue.Value) id := c.metricInfoStore.Register(&avalue, kind, subType) if kind == types.Dist { distributionRollOvers := c.distributionRollOversByPath[avalue.Path] if distributionRollOvers == nil { distributionRollOvers = &distributionRollOverType{} c.distributionRollOversByPath[avalue.Path] = distributionRollOvers } distribution := avalue.Value.(*messages.Distribution) rollOverCount := distributionRollOvers.UpdateAndFetchRollOverCount( id.Ranges(), distribution.Generation) currentDistributionTotals := &DistributionTotals{ Counts: distExtractCounts(distribution), Sum: distribution.Sum, RollOverCount: rollOverCount, } valueByMetric[id] = currentDistributionTotals } else { valueByMetric[id] = avalue.Value } groupIds[id.GroupId()] = true if !avalue.TimeStamp.IsZero() { timestampByGroupId[id.GroupId()] = duration.TimeToFloat(avalue.TimeStamp) } } // If a group ID is missing a timestamp, give it the // timestamp from scotty. for groupId := range groupIds { if _, ok := timestampByGroupId[groupId]; !ok { timestampByGroupId[groupId] = timestamp } } // populate notFetched for id, series := range c.timeSeries { if _, ok := valueByMetric[id]; !ok { notFetched = append(notFetched, series) } } // populate fetched and newOnes for id, value := range valueByMetric { if c.timeSeries[id] == nil { thisTs := timestampByGroupId[id.GroupId()] c.timeSeries[id] = newTimeSeriesType( id, thisTs, value, c.metrics) newOnes = append(newOnes, c.timeSeries[id]) } else { fetched[c.timeSeries[id]] = value } } // populate notFetchedTimeStamps for groupId, series := range c.timestampSeries { if _, ok := timestampByGroupId[groupId]; !ok { notFetchedTimeStamps = append( notFetchedTimeStamps, series) } } // populate fetchedTimeStamps and newTs for groupId, ts := range timestampByGroupId { if c.timestampSeries[groupId] == nil { c.timestampSeries[groupId] = newTimeStampSeriesType( groupId, ts, c.metrics) newTs = append(newTs, c.timestampSeries[groupId]) } else { fetchedTimeStamps[c.timestampSeries[groupId]] = ts } } return }
// TODO: Remove once we know the grafana bug involving duplicate timestamps // is fixed. func newPathAndMillisType(record *pstore.Record) pathAndMillisType { return pathAndMillisType{ Path: record.Path, Millis: int64(duration.TimeToFloat(record.Timestamp) * 1000.0)} }