func viewAdapterForRangeQuery(node Node, start clientmodel.Timestamp, end clientmodel.Timestamp, interval time.Duration, storage metric.PreloadingPersistence, queryStats *stats.TimerGroup) (*viewAdapter, error) { analyzeTimer := queryStats.GetTimer(stats.QueryAnalysisTime).Start() analyzer := NewQueryAnalyzer(storage) analyzer.AnalyzeQueries(node) analyzeTimer.Stop() requestBuildTimer := queryStats.GetTimer(stats.ViewRequestBuildTime).Start() viewBuilder := storage.NewViewRequestBuilder() for fingerprint, rangeDuration := range analyzer.FullRanges { if interval < rangeDuration { viewBuilder.GetMetricRange(&fingerprint, start.Add(-rangeDuration), end) } else { viewBuilder.GetMetricRangeAtInterval(&fingerprint, start.Add(-rangeDuration), end, interval, rangeDuration) } } for fingerprint := range analyzer.IntervalRanges { viewBuilder.GetMetricAtInterval(&fingerprint, start, end, interval) } requestBuildTimer.Stop() buildTimer := queryStats.GetTimer(stats.InnerViewBuildingTime).Start() view, err := viewBuilder.Execute(time.Duration(60)*time.Second, queryStats) buildTimer.Stop() if err != nil { return nil, err } return NewViewAdapter(view, storage, queryStats), nil }
func (t *TieredStorage) seriesTooOld(f *clientmodel.Fingerprint, i clientmodel.Timestamp) (bool, error) { // BUG(julius): Make this configurable by query layer. i = i.Add(-stalenessLimit) wm, cacheHit, _ := t.wmCache.Get(f) if !cacheHit { if t.memoryArena.HasFingerprint(f) { samples := t.memoryArena.CloneSamples(f) if len(samples) > 0 { newest := samples[len(samples)-1].Timestamp t.wmCache.Put(f, &watermarks{High: newest}) return newest.Before(i), nil } } highTime, diskHit, err := t.DiskStorage.MetricHighWatermarks.Get(f) if err != nil { return false, err } if diskHit { t.wmCache.Put(f, &watermarks{High: highTime}) return highTime.Before(i), nil } t.wmCache.Put(f, &watermarks{}) return true, nil } return wm.High.Before(i), nil }
func (s *memorySeriesStorage) preloadChunksForRange( fp clientmodel.Fingerprint, from clientmodel.Timestamp, through clientmodel.Timestamp, stalenessDelta time.Duration, ) ([]*chunkDesc, error) { s.fpLocker.Lock(fp) defer s.fpLocker.Unlock(fp) series, ok := s.fpToSeries.get(fp) if !ok { has, first, last, err := s.persistence.hasArchivedMetric(fp) if err != nil { return nil, err } if !has { s.invalidPreloadRequestsCount.Inc() return nil, nil } if from.Add(-stalenessDelta).Before(last) && through.Add(stalenessDelta).After(first) { metric, err := s.persistence.getArchivedMetric(fp) if err != nil { return nil, err } series = s.getOrCreateSeries(fp, metric) } else { return nil, nil } } return series.preloadChunksForRange(from, through, fp, s) }
func prepareInstantQuery(node Node, timestamp clientmodel.Timestamp, storage local.Storage, queryStats *stats.TimerGroup) (local.Preloader, error) { analyzeTimer := queryStats.GetTimer(stats.QueryAnalysisTime).Start() analyzer := NewQueryAnalyzer(storage) Walk(analyzer, node) analyzeTimer.Stop() // TODO: Preloading should time out after a given duration. preloadTimer := queryStats.GetTimer(stats.PreloadTime).Start() p := storage.NewPreloader() for fp, rangeDuration := range analyzer.FullRanges { if err := p.PreloadRange(fp, timestamp.Add(-rangeDuration), timestamp, *stalenessDelta); err != nil { p.Close() return nil, err } } for fp := range analyzer.IntervalRanges { if err := p.PreloadRange(fp, timestamp, timestamp, *stalenessDelta); err != nil { p.Close() return nil, err } } preloadTimer.Stop() ii := &iteratorInitializer{ storage: storage, } Walk(ii, node) return p, nil }
// EvalBoundaries implements the MatrixNode interface and returns the // boundary values of the selector. func (node *MatrixSelector) EvalBoundaries(timestamp clientmodel.Timestamp, view *viewAdapter) Matrix { interval := &metric.Interval{ OldestInclusive: timestamp.Add(-node.interval), NewestInclusive: timestamp, } values, err := view.GetBoundaryValues(node.fingerprints, interval) if err != nil { glog.Error("Unable to get boundary values for vector interval: ", err) return Matrix{} } return values }
func buildValues(firstValue clientmodel.SampleValue, from, to clientmodel.Timestamp, interval time.Duration) (v metric.Values) { for from.Before(to) { v = append(v, metric.SamplePair{ Value: firstValue, Timestamp: from, }) from = from.Add(interval) firstValue++ } return }
func buildSamples(from, to clientmodel.Timestamp, interval time.Duration, m clientmodel.Metric) (v clientmodel.Samples) { i := clientmodel.SampleValue(0) for from.Before(to) { v = append(v, &clientmodel.Sample{ Metric: m, Value: i, Timestamp: from, }) from = from.Add(interval) i++ } return }
func (l *valueAtIntervalAlongRangeList) Get(fp *clientmodel.Fingerprint, from, through clientmodel.Timestamp, interval, rangeDuration time.Duration) *getValueRangeAtIntervalOp { var op *getValueRangeAtIntervalOp v, ok := l.l.Get() if ok { op = v.(*getValueRangeAtIntervalOp) } else { op = &getValueRangeAtIntervalOp{} } op.fp = *fp op.current = from op.rangeThrough = from.Add(rangeDuration) op.rangeDuration = rangeDuration op.interval = interval op.through = through return op }
func generateTestSamples(endTime clientmodel.Timestamp, numTs int, samplesPerTs int, interval time.Duration) clientmodel.Samples { samples := make(clientmodel.Samples, 0, numTs*samplesPerTs) startTime := endTime.Add(-interval * time.Duration(samplesPerTs-1)) for ts := 0; ts < numTs; ts++ { metric := clientmodel.Metric{} metric[clientmodel.MetricNameLabel] = clientmodel.LabelValue(fmt.Sprintf("metric_%d", ts)) for i := 0; i < samplesPerTs; i++ { sample := &clientmodel.Sample{ Metric: metric, Value: clientmodel.SampleValue(ts + 1000*i), Timestamp: startTime.Add(interval * time.Duration(i)), } samples = append(samples, sample) } } sort.Sort(samples) return samples }
func prepareRangeQuery(node Node, start clientmodel.Timestamp, end clientmodel.Timestamp, interval time.Duration, storage local.Storage, queryStats *stats.TimerGroup) (local.Preloader, error) { analyzeTimer := queryStats.GetTimer(stats.QueryAnalysisTime).Start() analyzer := NewQueryAnalyzer(storage) Walk(analyzer, node) analyzeTimer.Stop() // TODO: Preloading should time out after a given duration. preloadTimer := queryStats.GetTimer(stats.PreloadTime).Start() p := storage.NewPreloader() for fp, rangeDuration := range analyzer.FullRanges { if err := p.PreloadRange(fp, start.Add(-rangeDuration), end, *stalenessDelta); err != nil { p.Close() return nil, err } /* if interval < rangeDuration { if err := p.GetMetricRange(fp, end, end.Sub(start)+rangeDuration); err != nil { p.Close() return nil, err } } else { if err := p.GetMetricRangeAtInterval(fp, start, end, interval, rangeDuration); err != nil { p.Close() return nil, err } } */ } for fp := range analyzer.IntervalRanges { if err := p.PreloadRange(fp, start, end, *stalenessDelta); err != nil { p.Close() return nil, err } } preloadTimer.Stop() ii := &iteratorInitializer{ storage: storage, } Walk(ii, node) return p, nil }
// EvalBoundaries implements the MatrixNode interface and returns the // boundary values of the selector. func (node *MatrixSelector) EvalBoundaries(timestamp clientmodel.Timestamp) Matrix { interval := &metric.Interval{ OldestInclusive: timestamp.Add(-node.interval), NewestInclusive: timestamp, } //// timer := v.stats.GetTimer(stats.GetBoundaryValuesTime).Start() sampleStreams := []SampleStream{} for fp, it := range node.iterators { samplePairs := it.GetBoundaryValues(*interval) if len(samplePairs) == 0 { continue } sampleStream := SampleStream{ Metric: node.metrics[fp], Values: samplePairs, } sampleStreams = append(sampleStreams, sampleStream) } //// timer.Stop() return sampleStreams }
// Run facilitates the curation lifecycle. // // recencyThreshold represents the most recent time up to which values will be // curated. // curationState is the on-disk store where the curation remarks are made for // how much progress has been made. func (c *Curator) Run(ignoreYoungerThan time.Duration, instant clientmodel.Timestamp, processor Processor, curationState CurationRemarker, samples *leveldb.LevelDBPersistence, watermarks HighWatermarker, status CurationStateUpdater) (err error) { defer func(t time.Time) { duration := float64(time.Since(t) / time.Millisecond) labels := map[string]string{ cutOff: fmt.Sprint(ignoreYoungerThan), processorName: processor.Name(), result: success, } if err != nil { labels[result] = failure } curationDuration.IncrementBy(labels, duration) curationDurations.Add(labels, duration) }(time.Now()) defer status.UpdateCurationState(&metric.CurationState{Active: false}) iterator, err := samples.NewIterator(true) if err != nil { return err } defer iterator.Close() if !iterator.SeekToLast() { glog.Info("Empty database; skipping curation.") return } keyDto, _ := c.dtoSampleKeys.Get() defer c.dtoSampleKeys.Give(keyDto) lastBlock, _ := c.sampleKeys.Get() defer c.sampleKeys.Give(lastBlock) if err := iterator.Key(keyDto); err != nil { panic(err) } lastBlock.Load(keyDto) if !iterator.SeekToFirst() { glog.Info("Empty database; skipping curation.") return } firstBlock, _ := c.sampleKeys.Get() defer c.sampleKeys.Give(firstBlock) if err := iterator.Key(keyDto); err != nil { panic(err) } firstBlock.Load(keyDto) scanner := &watermarkScanner{ curationState: curationState, ignoreYoungerThan: ignoreYoungerThan, processor: processor, status: status, stop: c.stop, stopAt: instant.Add(-1 * ignoreYoungerThan), sampleIterator: iterator, samples: samples, firstBlock: firstBlock, lastBlock: lastBlock, ViewQueue: c.viewQueue, dtoSampleKeys: c.dtoSampleKeys, sampleKeys: c.sampleKeys, } // Right now, the ability to stop a curation is limited to the beginning of // each fingerprint cycle. It is impractical to cease the work once it has // begun for a given series. _, err = watermarks.ForEach(scanner, scanner, scanner) return }