func (s *memorySeriesStorage) preloadChunksForRange( fp clientmodel.Fingerprint, from clientmodel.Timestamp, through clientmodel.Timestamp, stalenessDelta time.Duration, ) ([]*chunkDesc, error) { s.fpLocker.Lock(fp) defer s.fpLocker.Unlock(fp) series, ok := s.fpToSeries.get(fp) if !ok { has, first, last, err := s.persistence.hasArchivedMetric(fp) if err != nil { return nil, err } if !has { s.invalidPreloadRequestsCount.Inc() return nil, nil } if from.Add(-stalenessDelta).Before(last) && through.Add(stalenessDelta).After(first) { metric, err := s.persistence.getArchivedMetric(fp) if err != nil { return nil, err } series = s.getOrCreateSeries(fp, metric) } else { return nil, nil } } return series.preloadChunksForRange(from, through, fp, s) }
func prepareInstantQuery(node Node, timestamp clientmodel.Timestamp, storage local.Storage, queryStats *stats.TimerGroup) (local.Preloader, error) { analyzeTimer := queryStats.GetTimer(stats.QueryAnalysisTime).Start() analyzer := NewQueryAnalyzer(storage) Walk(analyzer, node) analyzeTimer.Stop() // TODO: Preloading should time out after a given duration. preloadTimer := queryStats.GetTimer(stats.PreloadTime).Start() p := storage.NewPreloader() for fp, rangeDuration := range analyzer.FullRanges { if err := p.PreloadRange(fp, timestamp.Add(-rangeDuration), timestamp, *stalenessDelta); err != nil { p.Close() return nil, err } } for fp := range analyzer.IntervalRanges { if err := p.PreloadRange(fp, timestamp, timestamp, *stalenessDelta); err != nil { p.Close() return nil, err } } preloadTimer.Stop() ii := &iteratorInitializer{ storage: storage, } Walk(ii, node) return p, nil }
func viewAdapterForRangeQuery(node Node, start clientmodel.Timestamp, end clientmodel.Timestamp, interval time.Duration, storage metric.PreloadingPersistence, queryStats *stats.TimerGroup) (*viewAdapter, error) { analyzeTimer := queryStats.GetTimer(stats.QueryAnalysisTime).Start() analyzer := NewQueryAnalyzer(storage) analyzer.AnalyzeQueries(node) analyzeTimer.Stop() requestBuildTimer := queryStats.GetTimer(stats.ViewRequestBuildTime).Start() viewBuilder := storage.NewViewRequestBuilder() for fingerprint, rangeDuration := range analyzer.FullRanges { if interval < rangeDuration { viewBuilder.GetMetricRange(&fingerprint, start.Add(-rangeDuration), end) } else { viewBuilder.GetMetricRangeAtInterval(&fingerprint, start.Add(-rangeDuration), end, interval, rangeDuration) } } for fingerprint := range analyzer.IntervalRanges { viewBuilder.GetMetricAtInterval(&fingerprint, start, end, interval) } requestBuildTimer.Stop() buildTimer := queryStats.GetTimer(stats.InnerViewBuildingTime).Start() view, err := viewBuilder.Execute(time.Duration(60)*time.Second, queryStats) buildTimer.Stop() if err != nil { return nil, err } return NewViewAdapter(view, storage, queryStats), nil }
func (t *TieredStorage) seriesTooOld(f *clientmodel.Fingerprint, i clientmodel.Timestamp) (bool, error) { // BUG(julius): Make this configurable by query layer. i = i.Add(-stalenessLimit) wm, cacheHit, _ := t.wmCache.Get(f) if !cacheHit { if t.memoryArena.HasFingerprint(f) { samples := t.memoryArena.CloneSamples(f) if len(samples) > 0 { newest := samples[len(samples)-1].Timestamp t.wmCache.Put(f, &watermarks{High: newest}) return newest.Before(i), nil } } highTime, diskHit, err := t.DiskStorage.MetricHighWatermarks.Get(f) if err != nil { return false, err } if diskHit { t.wmCache.Put(f, &watermarks{High: highTime}) return highTime.Before(i), nil } t.wmCache.Put(f, &watermarks{}) return true, nil } return wm.High.Before(i), nil }
// eval evaluates the rule expression and then creates pending alerts and fires // or removes previously pending alerts accordingly. func (rule *AlertingRule) eval(timestamp clientmodel.Timestamp, engine *promql.Engine) (promql.Vector, error) { query, err := engine.NewInstantQuery(rule.vector.String(), timestamp) if err != nil { return nil, err } exprResult, err := query.Exec().Vector() if err != nil { return nil, err } rule.mutex.Lock() defer rule.mutex.Unlock() // Create pending alerts for any new vector elements in the alert expression // or update the expression value for existing elements. resultFPs := map[clientmodel.Fingerprint]struct{}{} for _, sample := range exprResult { fp := sample.Metric.Metric.Fingerprint() resultFPs[fp] = struct{}{} if alert, ok := rule.activeAlerts[fp]; !ok { labels := clientmodel.LabelSet{} labels.MergeFromMetric(sample.Metric.Metric) labels = labels.Merge(rule.labels) if _, ok := labels[clientmodel.MetricNameLabel]; ok { delete(labels, clientmodel.MetricNameLabel) } rule.activeAlerts[fp] = &Alert{ Name: rule.name, Labels: labels, State: StatePending, ActiveSince: timestamp, Value: sample.Value, } } else { alert.Value = sample.Value } } vector := promql.Vector{} // Check if any pending alerts should be removed or fire now. Write out alert timeseries. for fp, activeAlert := range rule.activeAlerts { if _, ok := resultFPs[fp]; !ok { vector = append(vector, activeAlert.sample(timestamp, 0)) delete(rule.activeAlerts, fp) continue } if activeAlert.State == StatePending && timestamp.Sub(activeAlert.ActiveSince) >= rule.holdDuration { vector = append(vector, activeAlert.sample(timestamp, 0)) activeAlert.State = StateFiring } vector = append(vector, activeAlert.sample(timestamp, 1)) } return vector, nil }
// Update implements CurationRemarker. func (w *LevelDBCurationRemarker) Update(pair *curationKey, t clientmodel.Timestamp) error { k := &dto.CurationKey{} pair.dump(k) return w.LevelDBPersistence.Put(k, &dto.CurationValue{ LastCompletionTimestamp: proto.Int64(t.Unix()), }) }
func (rule *AlertingRule) Eval(timestamp clientmodel.Timestamp, storage metric.PreloadingPersistence) (ast.Vector, error) { // Get the raw value of the rule expression. exprResult, err := rule.EvalRaw(timestamp, storage) if err != nil { return nil, err } rule.mutex.Lock() defer rule.mutex.Unlock() // Create pending alerts for any new vector elements in the alert expression // or update the expression value for existing elements. resultFingerprints := utility.Set{} for _, sample := range exprResult { fp := new(clientmodel.Fingerprint) fp.LoadFromMetric(sample.Metric) resultFingerprints.Add(*fp) if alert, ok := rule.activeAlerts[*fp]; !ok { labels := clientmodel.LabelSet{} labels.MergeFromMetric(sample.Metric) labels = labels.Merge(rule.Labels) if _, ok := labels[clientmodel.MetricNameLabel]; ok { delete(labels, clientmodel.MetricNameLabel) } rule.activeAlerts[*fp] = &Alert{ Name: rule.name, Labels: labels, State: PENDING, ActiveSince: timestamp, Value: sample.Value, } } else { alert.Value = sample.Value } } vector := ast.Vector{} // Check if any pending alerts should be removed or fire now. Write out alert timeseries. for fp, activeAlert := range rule.activeAlerts { if !resultFingerprints.Has(fp) { vector = append(vector, activeAlert.sample(timestamp, 0)) delete(rule.activeAlerts, fp) continue } if activeAlert.State == PENDING && timestamp.Sub(activeAlert.ActiveSince) >= rule.holdDuration { vector = append(vector, activeAlert.sample(timestamp, 0)) activeAlert.State = FIRING } vector = append(vector, activeAlert.sample(timestamp, 1)) } return vector, nil }
// MayContain indicates whether the given SampleKey could potentially contain a // value at the provided time. Even if true is emitted, that does not mean a // satisfactory value, in fact, exists. func (s *SampleKey) MayContain(t clientmodel.Timestamp) bool { switch { case t.Before(s.FirstTimestamp): return false case t.After(s.LastTimestamp): return false default: return true } }
// EvalBoundaries implements the MatrixNode interface and returns the // boundary values of the selector. func (node *MatrixSelector) EvalBoundaries(timestamp clientmodel.Timestamp, view *viewAdapter) Matrix { interval := &metric.Interval{ OldestInclusive: timestamp.Add(-node.interval), NewestInclusive: timestamp, } values, err := view.GetBoundaryValues(node.fingerprints, interval) if err != nil { glog.Error("Unable to get boundary values for vector interval: ", err) return Matrix{} } return values }
// InsideInterval indicates whether a given range of sorted values could contain // a value for a given time. func (v Values) InsideInterval(t clientmodel.Timestamp) bool { switch { case v.Len() == 0: return false case t.Before(v[0].Timestamp): return false case !v[v.Len()-1].Timestamp.Before(t): return false default: return true } }
// interpolateSamples interpolates a value at a target time between two // provided sample pairs. func interpolateSamples(first, second *metric.SamplePair, timestamp clientmodel.Timestamp) *metric.SamplePair { dv := second.Value - first.Value dt := second.Timestamp.Sub(first.Timestamp) dDt := dv / clientmodel.SampleValue(dt) offset := clientmodel.SampleValue(timestamp.Sub(first.Timestamp)) return &metric.SamplePair{ Value: first.Value + (offset * dDt), Timestamp: timestamp, } }
func buildValues(firstValue clientmodel.SampleValue, from, to clientmodel.Timestamp, interval time.Duration) (v metric.Values) { for from.Before(to) { v = append(v, metric.SamplePair{ Value: firstValue, Timestamp: from, }) from = from.Add(interval) firstValue++ } return }
func (l *valueAtIntervalAlongRangeList) Get(fp *clientmodel.Fingerprint, from, through clientmodel.Timestamp, interval, rangeDuration time.Duration) *getValueRangeAtIntervalOp { var op *getValueRangeAtIntervalOp v, ok := l.l.Get() if ok { op = v.(*getValueRangeAtIntervalOp) } else { op = &getValueRangeAtIntervalOp{} } op.fp = *fp op.current = from op.rangeThrough = from.Add(rangeDuration) op.rangeDuration = rangeDuration op.interval = interval op.through = through return op }
func buildSamples(from, to clientmodel.Timestamp, interval time.Duration, m clientmodel.Metric) (v clientmodel.Samples) { i := clientmodel.SampleValue(0) for from.Before(to) { v = append(v, &clientmodel.Sample{ Metric: m, Value: i, Timestamp: from, }) from = from.Add(interval) i++ } return }
func generateTestSamples(endTime clientmodel.Timestamp, numTs int, samplesPerTs int, interval time.Duration) clientmodel.Samples { samples := make(clientmodel.Samples, 0, numTs*samplesPerTs) startTime := endTime.Add(-interval * time.Duration(samplesPerTs-1)) for ts := 0; ts < numTs; ts++ { metric := clientmodel.Metric{} metric[clientmodel.MetricNameLabel] = clientmodel.LabelValue(fmt.Sprintf("metric_%d", ts)) for i := 0; i < samplesPerTs; i++ { sample := &clientmodel.Sample{ Metric: metric, Value: clientmodel.SampleValue(ts + 1000*i), Timestamp: startTime.Add(interval * time.Duration(i)), } samples = append(samples, sample) } } sort.Sort(samples) return samples }
func prepareRangeQuery(node Node, start clientmodel.Timestamp, end clientmodel.Timestamp, interval time.Duration, storage local.Storage, queryStats *stats.TimerGroup) (local.Preloader, error) { analyzeTimer := queryStats.GetTimer(stats.QueryAnalysisTime).Start() analyzer := NewQueryAnalyzer(storage) Walk(analyzer, node) analyzeTimer.Stop() // TODO: Preloading should time out after a given duration. preloadTimer := queryStats.GetTimer(stats.PreloadTime).Start() p := storage.NewPreloader() for fp, rangeDuration := range analyzer.FullRanges { if err := p.PreloadRange(fp, start.Add(-rangeDuration), end, *stalenessDelta); err != nil { p.Close() return nil, err } /* if interval < rangeDuration { if err := p.GetMetricRange(fp, end, end.Sub(start)+rangeDuration); err != nil { p.Close() return nil, err } } else { if err := p.GetMetricRangeAtInterval(fp, start, end, interval, rangeDuration); err != nil { p.Close() return nil, err } } */ } for fp := range analyzer.IntervalRanges { if err := p.PreloadRange(fp, start, end, *stalenessDelta); err != nil { p.Close() return nil, err } } preloadTimer.Stop() ii := &iteratorInitializer{ storage: storage, } Walk(ii, node) return p, nil }
// preloadChunksForRange loads chunks for the given range from the persistence. // The caller must have locked the fingerprint of the series. func (s *memorySeries) preloadChunksForRange( from clientmodel.Timestamp, through clientmodel.Timestamp, fp clientmodel.Fingerprint, mss *memorySeriesStorage, ) ([]*chunkDesc, error) { firstChunkDescTime := clientmodel.Latest if len(s.chunkDescs) > 0 { firstChunkDescTime = s.chunkDescs[0].firstTime() } if s.chunkDescsOffset != 0 && from.Before(firstChunkDescTime) { cds, err := mss.loadChunkDescs(fp, firstChunkDescTime) if err != nil { return nil, err } s.chunkDescs = append(cds, s.chunkDescs...) s.chunkDescsOffset = 0 s.persistWatermark += len(cds) } if len(s.chunkDescs) == 0 { return nil, nil } // Find first chunk with start time after "from". fromIdx := sort.Search(len(s.chunkDescs), func(i int) bool { return s.chunkDescs[i].firstTime().After(from) }) // Find first chunk with start time after "through". throughIdx := sort.Search(len(s.chunkDescs), func(i int) bool { return s.chunkDescs[i].firstTime().After(through) }) if fromIdx > 0 { fromIdx-- } if throughIdx == len(s.chunkDescs) { throughIdx-- } pinIndexes := make([]int, 0, throughIdx-fromIdx+1) for i := fromIdx; i <= throughIdx; i++ { pinIndexes = append(pinIndexes, i) } return s.preloadChunks(pinIndexes, fp, mss) }
// ValueAtTime implements SeriesIterator. func (it *memorySeriesIterator) ValueAtTime(t clientmodel.Timestamp) metric.Values { // The most common case. We are iterating through a chunk. if it.chunkIt != nil && it.chunkIt.contains(t) { return it.chunkIt.valueAtTime(t) } if len(it.chunks) == 0 { return nil } // Before or exactly on the first sample of the series. it.chunkIt = it.chunkIterator(0) ts := it.chunkIt.timestampAtIndex(0) if !t.After(ts) { // return first value of first chunk return metric.Values{metric.SamplePair{ Timestamp: ts, Value: it.chunkIt.sampleValueAtIndex(0), }} } // After or exactly on the last sample of the series. it.chunkIt = it.chunkIterator(len(it.chunks) - 1) ts = it.chunkIt.lastTimestamp() if !t.Before(ts) { // return last value of last chunk return metric.Values{metric.SamplePair{ Timestamp: ts, Value: it.chunkIt.sampleValueAtIndex(it.chunkIt.length() - 1), }} } // Find last chunk where firstTime() is before or equal to t. l := len(it.chunks) - 1 i := sort.Search(len(it.chunks), func(i int) bool { return !it.chunks[l-i].firstTime().After(t) }) if i == len(it.chunks) { panic("out of bounds") } it.chunkIt = it.chunkIterator(l - i) ts = it.chunkIt.lastTimestamp() if t.After(ts) { // We ended up between two chunks. sp1 := metric.SamplePair{ Timestamp: ts, Value: it.chunkIt.sampleValueAtIndex(it.chunkIt.length() - 1), } it.chunkIt = it.chunkIterator(l - i + 1) return metric.Values{ sp1, metric.SamplePair{ Timestamp: it.chunkIt.timestampAtIndex(0), Value: it.chunkIt.sampleValueAtIndex(0), }, } } return it.chunkIt.valueAtTime(t) }
// EvalBoundaries implements the MatrixNode interface and returns the // boundary values of the selector. func (node *MatrixSelector) EvalBoundaries(timestamp clientmodel.Timestamp) Matrix { interval := &metric.Interval{ OldestInclusive: timestamp.Add(-node.interval), NewestInclusive: timestamp, } //// timer := v.stats.GetTimer(stats.GetBoundaryValuesTime).Start() sampleStreams := []SampleStream{} for fp, it := range node.iterators { samplePairs := it.GetBoundaryValues(*interval) if len(samplePairs) == 0 { continue } sampleStream := SampleStream{ Metric: node.metrics[fp], Values: samplePairs, } sampleStreams = append(sampleStreams, sampleStream) } //// timer.Stop() return sampleStreams }
// GetValueAtTime implements SeriesIterator. func (it *memorySeriesIterator) GetValueAtTime(t clientmodel.Timestamp) metric.Values { it.lock() defer it.unlock() // The most common case. We are iterating through a chunk. if it.chunkIt != nil && it.chunkIt.contains(t) { return it.chunkIt.getValueAtTime(t) } it.chunkIt = nil if len(it.chunks) == 0 { return nil } // Before or exactly on the first sample of the series. if !t.After(it.chunks[0].firstTime()) { // return first value of first chunk return it.chunks[0].newIterator().getValueAtTime(t) } // After or exactly on the last sample of the series. if !t.Before(it.chunks[len(it.chunks)-1].lastTime()) { // return last value of last chunk return it.chunks[len(it.chunks)-1].newIterator().getValueAtTime(t) } // Find first chunk where lastTime() is after or equal to t. i := sort.Search(len(it.chunks), func(i int) bool { return !it.chunks[i].lastTime().Before(t) }) if i == len(it.chunks) { panic("out of bounds") } if t.Before(it.chunks[i].firstTime()) { // We ended up between two chunks. return metric.Values{ it.chunks[i-1].newIterator().getValueAtTime(t)[0], it.chunks[i].newIterator().getValueAtTime(t)[0], } } // We ended up in the middle of a chunk. We might stay there for a while, // so save it as the current chunk iterator. it.chunkIt = it.chunks[i].newIterator() return it.chunkIt.getValueAtTime(t) }
// contains implements chunkIterator. func (it *deltaEncodedChunkIterator) contains(t clientmodel.Timestamp) bool { return !t.Before(it.chunk.firstTime()) && !t.After(it.chunk.lastTime()) }
// EncodeTimeInto writes the provided time into the specified buffer subject // to the LevelDB big endian key sort order requirement. func EncodeTimeInto(dst []byte, t clientmodel.Timestamp) { binary.BigEndian.PutUint64(dst, uint64(t.Unix())) }
// Run facilitates the curation lifecycle. // // recencyThreshold represents the most recent time up to which values will be // curated. // curationState is the on-disk store where the curation remarks are made for // how much progress has been made. func (c *Curator) Run(ignoreYoungerThan time.Duration, instant clientmodel.Timestamp, processor Processor, curationState CurationRemarker, samples *leveldb.LevelDBPersistence, watermarks HighWatermarker, status CurationStateUpdater) (err error) { defer func(t time.Time) { duration := float64(time.Since(t) / time.Millisecond) labels := map[string]string{ cutOff: fmt.Sprint(ignoreYoungerThan), processorName: processor.Name(), result: success, } if err != nil { labels[result] = failure } curationDuration.IncrementBy(labels, duration) curationDurations.Add(labels, duration) }(time.Now()) defer status.UpdateCurationState(&metric.CurationState{Active: false}) iterator, err := samples.NewIterator(true) if err != nil { return err } defer iterator.Close() if !iterator.SeekToLast() { glog.Info("Empty database; skipping curation.") return } keyDto, _ := c.dtoSampleKeys.Get() defer c.dtoSampleKeys.Give(keyDto) lastBlock, _ := c.sampleKeys.Get() defer c.sampleKeys.Give(lastBlock) if err := iterator.Key(keyDto); err != nil { panic(err) } lastBlock.Load(keyDto) if !iterator.SeekToFirst() { glog.Info("Empty database; skipping curation.") return } firstBlock, _ := c.sampleKeys.Get() defer c.sampleKeys.Give(firstBlock) if err := iterator.Key(keyDto); err != nil { panic(err) } firstBlock.Load(keyDto) scanner := &watermarkScanner{ curationState: curationState, ignoreYoungerThan: ignoreYoungerThan, processor: processor, status: status, stop: c.stop, stopAt: instant.Add(-1 * ignoreYoungerThan), sampleIterator: iterator, samples: samples, firstBlock: firstBlock, lastBlock: lastBlock, ViewQueue: c.viewQueue, dtoSampleKeys: c.dtoSampleKeys, sampleKeys: c.sampleKeys, } // Right now, the ability to stop a curation is limited to the beginning of // each fingerprint cycle. It is impractical to cease the work once it has // begun for a given series. _, err = watermarks.ForEach(scanner, scanner, scanner) return }
func (cd *chunkDesc) contains(t clientmodel.Timestamp) bool { return !t.Before(cd.firstTime()) && !t.After(cd.lastTime()) }
// Apply implements the Processor interface. func (p *CompactionProcessor) Apply(sampleIterator leveldb.Iterator, samplesPersistence raw.Persistence, stopAt clientmodel.Timestamp, fingerprint *clientmodel.Fingerprint) (lastCurated clientmodel.Timestamp, err error) { var pendingBatch raw.Batch defer func() { if pendingBatch != nil { pendingBatch.Close() } }() var pendingMutations = 0 var pendingSamples metric.Values var unactedSamples metric.Values var lastTouchedTime clientmodel.Timestamp var keyDropped bool sampleKey, _ := p.sampleKeys.Get() defer p.sampleKeys.Give(sampleKey) sampleKeyDto, _ := p.dtoSampleKeys.Get() defer p.dtoSampleKeys.Give(sampleKeyDto) if err = sampleIterator.Key(sampleKeyDto); err != nil { return } sampleKey.Load(sampleKeyDto) unactedSamples = unmarshalValues(sampleIterator.RawValue(), nil) for lastCurated.Before(stopAt) && lastTouchedTime.Before(stopAt) && sampleKey.Fingerprint.Equal(fingerprint) { switch { // Furnish a new pending batch operation if none is available. case pendingBatch == nil: pendingBatch = leveldb.NewBatch() // If there are no sample values to extract from the datastore, let's // continue extracting more values to use. We know that the time.Before() // block would prevent us from going into unsafe territory. case len(unactedSamples) == 0: if !sampleIterator.Next() { return lastCurated, fmt.Errorf("illegal condition: invalid iterator on continuation") } keyDropped = false if err = sampleIterator.Key(sampleKeyDto); err != nil { return } sampleKey.Load(sampleKeyDto) if !sampleKey.Fingerprint.Equal(fingerprint) { break } unactedSamples = unmarshalValues(sampleIterator.RawValue(), nil) // If the number of pending mutations exceeds the allowed batch amount, // commit to disk and delete the batch. A new one will be recreated if // necessary. case pendingMutations >= p.maximumMutationPoolBatch: err = samplesPersistence.Commit(pendingBatch) if err != nil { return } pendingMutations = 0 pendingBatch.Close() pendingBatch = nil case len(pendingSamples) == 0 && len(unactedSamples) >= p.minimumGroupSize: lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp unactedSamples = metric.Values{} case len(pendingSamples)+len(unactedSamples) < p.minimumGroupSize: if !keyDropped { k := &dto.SampleKey{} sampleKey.Dump(k) pendingBatch.Drop(k) keyDropped = true } pendingSamples = append(pendingSamples, unactedSamples...) lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp unactedSamples = metric.Values{} pendingMutations++ // If the number of pending writes equals the target group size case len(pendingSamples) == p.minimumGroupSize: k := &dto.SampleKey{} newSampleKey := buildSampleKey(fingerprint, pendingSamples) newSampleKey.Dump(k) b := marshalValues(pendingSamples, nil) pendingBatch.PutRaw(k, b) pendingMutations++ lastCurated = newSampleKey.FirstTimestamp if len(unactedSamples) > 0 { if !keyDropped { sampleKey.Dump(k) pendingBatch.Drop(k) keyDropped = true } if len(unactedSamples) > p.minimumGroupSize { pendingSamples = unactedSamples[:p.minimumGroupSize] unactedSamples = unactedSamples[p.minimumGroupSize:] lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp } else { pendingSamples = unactedSamples lastTouchedTime = pendingSamples[len(pendingSamples)-1].Timestamp unactedSamples = metric.Values{} } } case len(pendingSamples)+len(unactedSamples) >= p.minimumGroupSize: if !keyDropped { k := &dto.SampleKey{} sampleKey.Dump(k) pendingBatch.Drop(k) keyDropped = true } remainder := p.minimumGroupSize - len(pendingSamples) pendingSamples = append(pendingSamples, unactedSamples[:remainder]...) unactedSamples = unactedSamples[remainder:] if len(unactedSamples) == 0 { lastTouchedTime = pendingSamples[len(pendingSamples)-1].Timestamp } else { lastTouchedTime = unactedSamples[len(unactedSamples)-1].Timestamp } pendingMutations++ default: err = fmt.Errorf("unhandled processing case") } } if len(unactedSamples) > 0 || len(pendingSamples) > 0 { pendingSamples = append(pendingSamples, unactedSamples...) k := &dto.SampleKey{} newSampleKey := buildSampleKey(fingerprint, pendingSamples) newSampleKey.Dump(k) b := marshalValues(pendingSamples, nil) pendingBatch.PutRaw(k, b) pendingSamples = metric.Values{} pendingMutations++ lastCurated = newSampleKey.FirstTimestamp } // This is not deferred due to the off-chance that a pre-existing commit // failed. if pendingBatch != nil && pendingMutations > 0 { err = samplesPersistence.Commit(pendingBatch) if err != nil { return } } return }
// contains implements chunkIterator. func (it *deltaEncodedChunkIterator) contains(t clientmodel.Timestamp) bool { return !t.Before(it.baseT) && !t.After(it.timestampAtIndex(it.len-1)) }
// === time() clientmodel.SampleValue === func timeImpl(timestamp clientmodel.Timestamp, args []Node) interface{} { return clientmodel.SampleValue(timestamp.Unix()) }
func (t *TieredStorage) loadChunkAroundTime( iterator leveldb.Iterator, fingerprint *clientmodel.Fingerprint, ts clientmodel.Timestamp, firstBlock, lastBlock *SampleKey, ) (chunk metric.Values, expired bool) { if fingerprint.Less(firstBlock.Fingerprint) { return nil, false } if lastBlock.Fingerprint.Less(fingerprint) { return nil, true } seekingKey, _ := t.sampleKeys.Get() defer t.sampleKeys.Give(seekingKey) seekingKey.Fingerprint = fingerprint if fingerprint.Equal(firstBlock.Fingerprint) && ts.Before(firstBlock.FirstTimestamp) { seekingKey.FirstTimestamp = firstBlock.FirstTimestamp } else if fingerprint.Equal(lastBlock.Fingerprint) && ts.After(lastBlock.FirstTimestamp) { seekingKey.FirstTimestamp = lastBlock.FirstTimestamp } else { seekingKey.FirstTimestamp = ts } dto, _ := t.dtoSampleKeys.Get() defer t.dtoSampleKeys.Give(dto) seekingKey.Dump(dto) if !iterator.Seek(dto) { return chunk, true } var foundValues metric.Values if err := iterator.Key(dto); err != nil { panic(err) } seekingKey.Load(dto) if seekingKey.Fingerprint.Equal(fingerprint) { // Figure out if we need to rewind by one block. // Imagine the following supertime blocks with time ranges: // // Block 1: ft 1000 - lt 1009 <data> // Block 1: ft 1010 - lt 1019 <data> // // If we are aiming to find time 1005, we would first seek to the block with // supertime 1010, then need to rewind by one block by virtue of LevelDB // iterator seek behavior. // // Only do the rewind if there is another chunk before this one. if !seekingKey.MayContain(ts) { postValues := unmarshalValues(iterator.RawValue(), nil) if !seekingKey.Equal(firstBlock) { if !iterator.Previous() { panic("This should never return false.") } if err := iterator.Key(dto); err != nil { panic(err) } seekingKey.Load(dto) if !seekingKey.Fingerprint.Equal(fingerprint) { return postValues, false } foundValues = unmarshalValues(iterator.RawValue(), nil) foundValues = append(foundValues, postValues...) return foundValues, false } } foundValues = unmarshalValues(iterator.RawValue(), nil) return foundValues, false } if fingerprint.Less(seekingKey.Fingerprint) { if !seekingKey.Equal(firstBlock) { if !iterator.Previous() { panic("This should never return false.") } if err := iterator.Key(dto); err != nil { panic(err) } seekingKey.Load(dto) if !seekingKey.Fingerprint.Equal(fingerprint) { return nil, false } foundValues = unmarshalValues(iterator.RawValue(), nil) return foundValues, false } } panic("illegal state: violated sort invariant") }