Пример #1
0
func (l *LevelDBMetricPersistence) refreshHighWatermarks(groups map[model.Fingerprint]model.Samples) (err error) {
	begin := time.Now()
	defer func() {
		duration := time.Since(begin)

		recordOutcome(duration, err, map[string]string{operation: refreshHighWatermarks, result: success}, map[string]string{operation: refreshHighWatermarks, result: failure})
	}()

	batch := leveldb.NewBatch()
	defer batch.Close()

	var (
		mutationCount = 0
	)
	for fingerprint, samples := range groups {
		var (
			key                   = &dto.Fingerprint{}
			value                 = &dto.MetricHighWatermark{}
			raw                   []byte
			newestSampleTimestamp = samples[len(samples)-1].Timestamp
			keyEncoded            = coding.NewProtocolBuffer(key)
		)

		key.Signature = proto.String(fingerprint.ToRowKey())
		raw, err = l.metricHighWatermarks.Get(keyEncoded)
		if err != nil {
			panic(err)
			return
		}

		if raw != nil {
			err = proto.Unmarshal(raw, value)
			if err != nil {
				panic(err)
				continue
			}

			if newestSampleTimestamp.Before(time.Unix(*value.Timestamp, 0)) {
				continue
			}
		}
		value.Timestamp = proto.Int64(newestSampleTimestamp.Unix())
		batch.Put(keyEncoded, coding.NewProtocolBuffer(value))
		mutationCount++
	}

	err = l.metricHighWatermarks.Commit(batch)
	if err != nil {
		panic(err)
	}

	return
}
Пример #2
0
func (c curationState) Get() (key, value coding.Encoder) {
	key = coding.NewProtocolBuffer(&dto.CurationKey{
		Fingerprint:      model.NewFingerprintFromRowKey(c.fingerprint).ToDTO(),
		MinimumGroupSize: proto.Uint32(uint32(c.groupSize)),
		OlderThan:        proto.Int64(int64(c.recencyThreshold)),
	})

	value = coding.NewProtocolBuffer(&dto.CurationValue{
		LastCompletionTimestamp: proto.Int64(c.lastCurated.Unix()),
	})

	return
}
Пример #3
0
func (l *LevelDBMetricPersistence) GetFingerprintsForLabelName(labelName model.LabelName) (fps model.Fingerprints, err error) {
	begin := time.Now()

	defer func() {
		duration := time.Since(begin)

		recordOutcome(duration, err, map[string]string{operation: getFingerprintsForLabelName, result: success}, map[string]string{operation: getFingerprintsForLabelName, result: failure})
	}()

	raw, err := l.labelNameToFingerprints.Get(coding.NewProtocolBuffer(model.LabelNameToDTO(&labelName)))
	if err != nil {
		return
	}

	unmarshaled := &dto.FingerprintCollection{}

	err = proto.Unmarshal(raw, unmarshaled)
	if err != nil {
		return
	}

	for _, m := range unmarshaled.Member {
		fp := model.NewFingerprintFromRowKey(*m.Signature)
		fps = append(fps, fp)
	}

	return
}
Пример #4
0
func (l *LevelDBMetricPersistence) GetMetricForFingerprint(f model.Fingerprint) (m *model.Metric, err error) {
	begin := time.Now()

	defer func() {
		duration := time.Since(begin)

		recordOutcome(duration, err, map[string]string{operation: getMetricForFingerprint, result: success}, map[string]string{operation: getMetricForFingerprint, result: failure})
	}()

	raw, err := l.fingerprintToMetrics.Get(coding.NewProtocolBuffer(model.FingerprintToDTO(f)))
	if err != nil {
		return
	}

	unmarshaled := &dto.Metric{}
	err = proto.Unmarshal(raw, unmarshaled)
	if err != nil {
		return
	}

	metric := model.Metric{}

	for _, v := range unmarshaled.LabelPair {
		metric[model.LabelName(*v.Name)] = model.LabelValue(*v.Value)
	}

	// Explicit address passing here shaves immense amounts of time off of the
	// code flow due to less tight-loop dereferencing.
	m = &metric

	return
}
Пример #5
0
// curationConsistent determines whether the given metric is in a dirty state
// and needs curation.
func (w watermarkOperator) curationConsistent(f model.Fingerprint, watermark model.Watermark) (consistent bool, err error) {
	var (
		rawValue      []byte
		curationValue = &dto.CurationValue{}
		curationKey   = &dto.CurationKey{
			Fingerprint:      f.ToDTO(),
			OlderThan:        proto.Int64(w.olderThan.Unix()),
			MinimumGroupSize: proto.Uint32(w.groupSize),
		}
	)

	rawValue, err = w.curationState.Get(coding.NewProtocolBuffer(curationKey))
	if err != nil {
		return
	}

	err = proto.Unmarshal(rawValue, curationValue)
	if err != nil {
		return
	}

	curationRemark := model.NewCurationRemarkFromDTO(curationValue)
	if !curationRemark.OlderThanLimit(watermark.Time) {
		consistent = true
		return
	}

	return
}
Пример #6
0
func (w watermarkFilter) Filter(key, value interface{}) (result storage.FilterResult) {
	fingerprint := key.(model.Fingerprint)
	watermark := value.(model.Watermark)
	curationKey := &dto.CurationKey{
		Fingerprint:      fingerprint.ToDTO(),
		MinimumGroupSize: proto.Uint32(w.groupSize),
		OlderThan:        proto.Int64(int64(w.recencyThreshold)),
	}
	curationValue := &dto.CurationValue{}

	rawCurationValue, err := w.curationState.Get(coding.NewProtocolBuffer(curationKey))
	if err != nil {
		panic(err)
	}

	err = proto.Unmarshal(rawCurationValue, curationValue)
	if err != nil {
		panic(err)
	}

	switch {
	case model.NewCurationRemarkFromDTO(curationValue).OlderThanLimit(watermark.Time):
		result = storage.ACCEPT
	case len(w.stop) != 0:
		result = storage.STOP
	default:
		result = storage.SKIP
	}

	return
}
Пример #7
0
// hasBeenCurated answers true if the provided Fingerprint has been curated in
// in the past.
func (w watermarkOperator) hasBeenCurated(f model.Fingerprint) (curated bool, err error) {
	curationKey := &dto.CurationKey{
		Fingerprint:      f.ToDTO(),
		OlderThan:        proto.Int64(w.olderThan.Unix()),
		MinimumGroupSize: proto.Uint32(w.groupSize),
	}

	curated, err = w.curationState.Has(coding.NewProtocolBuffer(curationKey))

	return
}
Пример #8
0
func (s sampleGroup) Get() (key, value coding.Encoder) {
	key = coding.NewProtocolBuffer(&dto.SampleKey{
		Fingerprint:   model.NewFingerprintFromRowKey(s.fingerprint).ToDTO(),
		Timestamp:     indexable.EncodeTime(s.values[0].time),
		LastTimestamp: proto.Int64(s.values[len(s.values)-1].time.Unix()),
		SampleCount:   proto.Uint32(uint32(len(s.values))),
	})

	series := &dto.SampleValueSeries{}

	for _, value := range s.values {
		series.Value = append(series.Value, &dto.SampleValueSeries_Value{
			Timestamp: proto.Int64(value.time.Unix()),
			Value:     proto.Float32(float32(value.value)),
		})
	}

	value = coding.NewProtocolBuffer(series)

	return
}
Пример #9
0
func (l *LevelDBMetricPersistence) hasIndexMetric(dto *dto.Metric) (value bool, err error) {
	begin := time.Now()

	defer func() {
		duration := time.Since(begin)

		recordOutcome(duration, err, map[string]string{operation: hasIndexMetric, result: success}, map[string]string{operation: hasIndexMetric, result: failure})
	}()

	dtoKey := coding.NewProtocolBuffer(dto)
	value, err = l.metricMembershipIndex.Has(dtoKey)

	return
}
Пример #10
0
func (l *LevelDBMetricPersistence) HasLabelName(dto *dto.LabelName) (value bool, err error) {
	begin := time.Now()

	defer func() {
		duration := time.Since(begin)

		recordOutcome(duration, err, map[string]string{operation: hasLabelName, result: success}, map[string]string{operation: hasLabelName, result: failure})
	}()

	dtoKey := coding.NewProtocolBuffer(dto)
	value, err = l.labelNameToFingerprints.Has(dtoKey)

	return
}
Пример #11
0
func (l *LevelDBMetricPersistence) GetFingerprintsForLabelSet(labelSet model.LabelSet) (fps model.Fingerprints, err error) {
	begin := time.Now()

	defer func() {
		duration := time.Since(begin)

		recordOutcome(duration, err, map[string]string{operation: getFingerprintsForLabelSet, result: success}, map[string]string{operation: getFingerprintsForLabelSet, result: failure})
	}()

	sets := []utility.Set{}

	for _, labelSetDTO := range model.LabelSetToDTOs(&labelSet) {
		f, err := l.labelSetToFingerprints.Get(coding.NewProtocolBuffer(labelSetDTO))
		if err != nil {
			return fps, err
		}

		unmarshaled := &dto.FingerprintCollection{}
		err = proto.Unmarshal(f, unmarshaled)
		if err != nil {
			return fps, err
		}

		set := utility.Set{}

		for _, m := range unmarshaled.Member {
			fp := model.NewFingerprintFromRowKey(*m.Signature)
			set.Add(fp)
		}

		sets = append(sets, set)
	}

	numberOfSets := len(sets)
	if numberOfSets == 0 {
		return
	}

	base := sets[0]
	for i := 1; i < numberOfSets; i++ {
		base = base.Intersection(sets[i])
	}
	for _, e := range base.Elements() {
		fingerprint := e.(model.Fingerprint)
		fps = append(fps, fingerprint)
	}

	return
}
Пример #12
0
// indexFingerprints updates all of the Fingerprint to Metric reverse lookups
// in the index and then bulk updates.
//
// This operation is idempotent.
func (l *LevelDBMetricPersistence) indexFingerprints(metrics map[model.Fingerprint]model.Metric) (err error) {
	begin := time.Now()
	defer func() {
		duration := time.Since(begin)

		recordOutcome(duration, err, map[string]string{operation: indexFingerprints, result: success}, map[string]string{operation: indexFingerprints, result: failure})
	}()

	batch := leveldb.NewBatch()
	defer batch.Close()

	for fingerprint, metric := range metrics {
		key := coding.NewProtocolBuffer(fingerprint.ToDTO())
		value := coding.NewProtocolBuffer(model.MetricToDTO(metric))
		batch.Put(key, value)
	}

	err = l.fingerprintToMetrics.Commit(batch)
	if err != nil {
		panic(err)
	}

	return
}
Пример #13
0
func (l *LevelDBMetricPersistence) GetValueAtTime(fp model.Fingerprint, t time.Time, s StalenessPolicy) (sample *model.Sample, err error) {
	begin := time.Now()

	defer func() {
		duration := time.Since(begin)

		recordOutcome(duration, err, map[string]string{operation: getValueAtTime, result: success}, map[string]string{operation: getValueAtTime, result: failure})
	}()

	// TODO: memoize/cache this or change the return type to metric.SamplePair.
	m, err := l.GetMetricForFingerprint(fp)
	if err != nil {
		return
	}

	// Candidate for Refactoring
	k := &dto.SampleKey{
		Fingerprint: fp.ToDTO(),
		Timestamp:   indexable.EncodeTime(t),
	}

	e, err := coding.NewProtocolBuffer(k).Encode()
	if err != nil {
		return
	}

	iterator := l.metricSamples.NewIterator(true)
	defer iterator.Close()

	if !iterator.Seek(e) {
		/*
		 * Two cases for this:
		 * 1.) Corruption in LevelDB.
		 * 2.) Key seek after AND outside known range.
		 *
		 * Once a LevelDB iterator goes invalid, it cannot be recovered; thusly,
		 * we need to create a new in order to check if the last value in the
		 * database is sufficient for our purposes.  This is, in all reality, a
		 * corner case but one that could bring down the system.
		 */
		iterator = l.metricSamples.NewIterator(true)
		defer iterator.Close()

		if !iterator.SeekToLast() {
			/*
			 * For whatever reason, the LevelDB cannot be recovered.
			 */
			return
		}
	}

	var (
		firstKey   *dto.SampleKey
		firstValue *dto.SampleValueSeries
	)

	firstKey, err = extractSampleKey(iterator)
	if err != nil {
		return
	}

	peekAhead := false

	if !fingerprintsEqual(firstKey.Fingerprint, k.Fingerprint) {
		/*
		 * This allows us to grab values for metrics if our request time is after
		 * the last recorded time subject to the staleness policy due to the nuances
		 * of LevelDB storage:
		 *
		 * # Assumptions:
		 * - K0 < K1 in terms of sorting.
		 * - T0 < T1 in terms of sorting.
		 *
		 * # Data
		 *
		 * K0-T0
		 * K0-T1
		 * K0-T2
		 * K1-T0
		 * K1-T1
		 *
		 * # Scenario
		 * K0-T3, which does not exist, is requested.  LevelDB will thusly seek to
		 * K1-T1, when K0-T2 exists as a perfectly good candidate to check subject
		 * to the provided staleness policy and such.
		 */
		peekAhead = true
	}

	firstTime := indexable.DecodeTime(firstKey.Timestamp)
	if t.Before(firstTime) || peekAhead {
		if !iterator.Previous() {
			/*
			 * Two cases for this:
			 * 1.) Corruption in LevelDB.
			 * 2.) Key seek before AND outside known range.
			 *
			 * This is an explicit validation to ensure that if no previous values for
			 * the series are found, the query aborts.
			 */
			return
		}

		var (
			alternativeKey   *dto.SampleKey
			alternativeValue *dto.SampleValueSeries
		)

		alternativeKey, err = extractSampleKey(iterator)
		if err != nil {
			return
		}

		if !fingerprintsEqual(alternativeKey.Fingerprint, k.Fingerprint) {
			return
		}

		/*
		 * At this point, we found a previous value in the same series in the
		 * database.  LevelDB originally seeked to the subsequent element given
		 * the key, but we need to consider this adjacency instead.
		 */
		alternativeTime := indexable.DecodeTime(alternativeKey.Timestamp)

		firstKey = alternativeKey
		firstValue = alternativeValue
		firstTime = alternativeTime
	}

	firstDelta := firstTime.Sub(t)
	if firstDelta < 0 {
		firstDelta *= -1
	}
	if firstDelta > s.DeltaAllowance {
		return
	}

	firstValue, err = extractSampleValues(iterator)
	if err != nil {
		return
	}

	sample = model.SampleFromDTO(m, &t, firstValue)

	if firstDelta == time.Duration(0) {
		return
	}

	if !iterator.Next() {
		/*
		 * Two cases for this:
		 * 1.) Corruption in LevelDB.
		 * 2.) Key seek after AND outside known range.
		 *
		 * This means that there are no more values left in the storage; and if this
		 * point is reached, we know that the one that has been found is within the
		 * allowed staleness limits.
		 */
		return
	}

	var secondKey *dto.SampleKey

	secondKey, err = extractSampleKey(iterator)
	if err != nil {
		return
	}

	if !fingerprintsEqual(secondKey.Fingerprint, k.Fingerprint) {
		return
	} else {
		/*
		 * At this point, current entry in the database has the same key as the
		 * previous.  For this reason, the validation logic will expect that the
		 * distance between the two points shall not exceed the staleness policy
		 * allowed limit to reduce interpolation errors.
		 *
		 * For this reason, the sample is reset in case of other subsequent
		 * validation behaviors.
		 */
		sample = nil
	}

	secondTime := indexable.DecodeTime(secondKey.Timestamp)

	totalDelta := secondTime.Sub(firstTime)
	if totalDelta > s.DeltaAllowance {
		return
	}

	var secondValue *dto.SampleValueSeries

	secondValue, err = extractSampleValues(iterator)
	if err != nil {
		return
	}

	fValue := *firstValue.Value[0].Value
	sValue := *secondValue.Value[0].Value

	interpolated := interpolate(firstTime, secondTime, fValue, sValue, t)

	sampleValue := &dto.SampleValueSeries{}
	sampleValue.Value = append(sampleValue.Value, &dto.SampleValueSeries_Value{Value: &interpolated})

	sample = model.SampleFromDTO(m, &t, sampleValue)

	return
}
Пример #14
0
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package leveldb

import (
	"github.com/prometheus/prometheus/coding"
	dto "github.com/prometheus/prometheus/model/generated"
	"github.com/prometheus/prometheus/storage/raw"
	"github.com/prometheus/prometheus/storage/raw/leveldb"
)

var (
	existenceValue = coding.NewProtocolBuffer(&dto.MembershipIndexValue{})
)

type LevelDBMembershipIndex struct {
	persistence *leveldb.LevelDBPersistence
}

func (l *LevelDBMembershipIndex) Close() {
	l.persistence.Close()
}

func (l *LevelDBMembershipIndex) Has(key coding.Encoder) (bool, error) {
	return l.persistence.Has(key)
}

func (l *LevelDBMembershipIndex) Drop(key coding.Encoder) error {
Пример #15
0
func (w watermarkState) Get() (key, value coding.Encoder) {
	key = coding.NewProtocolBuffer(model.NewFingerprintFromRowKey(w.fingerprint).ToDTO())
	value = coding.NewProtocolBuffer(model.NewWatermarkFromTime(w.lastAppended).ToMetricHighWatermarkDTO())
	return
}
Пример #16
0
func (t *tieredStorage) loadChunkAroundTime(iterator leveldb.Iterator, frontier *seriesFrontier, fingerprint model.Fingerprint, ts time.Time) (chunk []model.SamplePair) {
	var (
		targetKey = &dto.SampleKey{
			Fingerprint: fingerprint.ToDTO(),
		}
		foundKey   = &dto.SampleKey{}
		foundValue *dto.SampleValueSeries
	)

	// Limit the target key to be within the series' keyspace.
	if ts.After(frontier.lastSupertime) {
		targetKey.Timestamp = indexable.EncodeTime(frontier.lastSupertime)
	} else {
		targetKey.Timestamp = indexable.EncodeTime(ts)
	}

	// Try seeking to target key.
	rawKey, _ := coding.NewProtocolBuffer(targetKey).Encode()
	iterator.Seek(rawKey)

	foundKey, err := extractSampleKey(iterator)
	if err != nil {
		panic(err)
	}

	// Figure out if we need to rewind by one block.
	// Imagine the following supertime blocks with time ranges:
	//
	// Block 1: ft 1000 - lt 1009 <data>
	// Block 1: ft 1010 - lt 1019 <data>
	//
	// If we are aiming to find time 1005, we would first seek to the block with
	// supertime 1010, then need to rewind by one block by virtue of LevelDB
	// iterator seek behavior.
	//
	// Only do the rewind if there is another chunk before this one.
	rewound := false
	firstTime := indexable.DecodeTime(foundKey.Timestamp)
	if ts.Before(firstTime) && !frontier.firstSupertime.After(ts) {
		iterator.Previous()
		rewound = true
	}

	foundValue, err = extractSampleValues(iterator)
	if err != nil {
		panic(err)
	}

	// If we rewound, but the target time is still past the current block, return
	// the last value of the current (rewound) block and the entire next block.
	if rewound {
		foundKey, err = extractSampleKey(iterator)
		if err != nil {
			panic(err)
		}
		currentChunkLastTime := time.Unix(*foundKey.LastTimestamp, 0)

		if ts.After(currentChunkLastTime) {
			sampleCount := len(foundValue.Value)
			chunk = append(chunk, model.SamplePair{
				Timestamp: time.Unix(*foundValue.Value[sampleCount-1].Timestamp, 0),
				Value:     model.SampleValue(*foundValue.Value[sampleCount-1].Value),
			})
			// We know there's a next block since we have rewound from it.
			iterator.Next()

			foundValue, err = extractSampleValues(iterator)
			if err != nil {
				panic(err)
			}
		}
	}

	// Now append all the samples of the currently seeked block to the output.
	for _, sample := range foundValue.Value {
		chunk = append(chunk, model.SamplePair{
			Timestamp: time.Unix(*sample.Timestamp, 0),
			Value:     model.SampleValue(*sample.Value),
		})
	}

	return
}
Пример #17
0
// newSeriesFrontier furnishes a populated diskFrontier for a given
// fingerprint.  A nil diskFrontier will be returned if the series cannot
// be found in the store.
func newSeriesFrontier(f model.Fingerprint, d diskFrontier, i leveldb.Iterator) (s *seriesFrontier, err error) {
	var (
		lowerSeek = firstSupertime
		upperSeek = lastSupertime
	)

	// If the diskFrontier for this iterator says that the candidate fingerprint
	// is outside of its seeking domain, there is no way that a seriesFrontier
	// could be materialized.  Simply bail.
	if !d.ContainsFingerprint(f) {
		return
	}

	// If we are either the first or the last key in the database, we need to use
	// pessimistic boundary frontiers.
	if f.Equal(d.firstFingerprint) {
		lowerSeek = indexable.EncodeTime(d.firstSupertime)
	}
	if f.Equal(d.lastFingerprint) {
		upperSeek = indexable.EncodeTime(d.lastSupertime)
	}

	key := &dto.SampleKey{
		Fingerprint: f.ToDTO(),
		Timestamp:   upperSeek,
	}

	raw, err := coding.NewProtocolBuffer(key).Encode()
	if err != nil {
		panic(err)
	}
	i.Seek(raw)

	if i.Key() == nil {
		return
	}

	retrievedKey, err := extractSampleKey(i)
	if err != nil {
		panic(err)
	}

	retrievedFingerprint := model.NewFingerprintFromRowKey(*retrievedKey.Fingerprint.Signature)

	// The returned fingerprint may not match if the original seek key lives
	// outside of a metric's frontier.  This is probable, for we are seeking to
	// to the maximum allowed time, which could advance us to the next
	// fingerprint.
	//
	//
	if !retrievedFingerprint.Equal(f) {
		i.Previous()

		retrievedKey, err = extractSampleKey(i)
		if err != nil {
			panic(err)
		}
		retrievedFingerprint := model.NewFingerprintFromRowKey(*retrievedKey.Fingerprint.Signature)
		// If the previous key does not match, we know that the requested
		// fingerprint does not live in the database.
		if !retrievedFingerprint.Equal(f) {
			return
		}
	}

	s = &seriesFrontier{
		lastSupertime: indexable.DecodeTime(retrievedKey.Timestamp),
		lastTime:      time.Unix(*retrievedKey.LastTimestamp, 0),
	}

	key.Timestamp = lowerSeek

	raw, err = coding.NewProtocolBuffer(key).Encode()
	if err != nil {
		panic(err)
	}

	i.Seek(raw)

	retrievedKey, err = extractSampleKey(i)
	if err != nil {
		panic(err)
	}

	retrievedFingerprint = model.NewFingerprintFromRowKey(*retrievedKey.Fingerprint.Signature)

	s.firstSupertime = indexable.DecodeTime(retrievedKey.Timestamp)

	return
}
Пример #18
0
func (l *LevelDBMetricPersistence) GetRangeValues(fp model.Fingerprint, i model.Interval) (v *model.SampleSet, err error) {
	begin := time.Now()

	defer func() {
		duration := time.Since(begin)

		recordOutcome(duration, err, map[string]string{operation: getRangeValues, result: success}, map[string]string{operation: getRangeValues, result: failure})
	}()

	k := &dto.SampleKey{
		Fingerprint: fp.ToDTO(),
		Timestamp:   indexable.EncodeTime(i.OldestInclusive),
	}

	e, err := coding.NewProtocolBuffer(k).Encode()
	if err != nil {
		return
	}

	iterator := l.metricSamples.NewIterator(true)
	defer iterator.Close()

	predicate := keyIsOlderThan(i.NewestInclusive)

	for valid := iterator.Seek(e); valid; valid = iterator.Next() {
		retrievedKey := &dto.SampleKey{}

		retrievedKey, err = extractSampleKey(iterator)
		if err != nil {
			return
		}

		if predicate(retrievedKey) {
			break
		}

		if !fingerprintsEqual(retrievedKey.Fingerprint, k.Fingerprint) {
			break
		}

		retrievedValue, err := extractSampleValues(iterator)
		if err != nil {
			return nil, err
		}

		if v == nil {
			// TODO: memoize/cache this or change the return type to metric.SamplePair.
			m, err := l.GetMetricForFingerprint(fp)
			if err != nil {
				return v, err
			}
			v = &model.SampleSet{
				Metric: *m,
			}
		}

		v.Values = append(v.Values, model.SamplePair{
			Value:     model.SampleValue(*retrievedValue.Value[0].Value),
			Timestamp: indexable.DecodeTime(retrievedKey.Timestamp),
		})
	}

	// XXX: We should not explicitly sort here but rather rely on the datastore.
	//      This adds appreciable overhead.
	if v != nil {
		sort.Sort(v.Values)
	}

	return
}
Пример #19
0
// indexLabelPairs accumulates all label pair to fingerprint index entries for
// the dirty metrics, appends the new dirtied metrics, sorts, and bulk updates
// the index to reflect the new state.
//
// This operation is idempotent.
func (l *LevelDBMetricPersistence) indexLabelPairs(metrics map[model.Fingerprint]model.Metric) (err error) {
	begin := time.Now()
	defer func() {
		duration := time.Since(begin)

		recordOutcome(duration, err, map[string]string{operation: indexLabelPairs, result: success}, map[string]string{operation: indexLabelPairs, result: failure})
	}()

	labelPairFingerprints := map[model.LabelPair]utility.Set{}

	for fingerprint, metric := range metrics {
		for labelName, labelValue := range metric {
			labelPair := model.LabelPair{
				Name:  labelName,
				Value: labelValue,
			}
			fingerprintSet, ok := labelPairFingerprints[labelPair]
			if !ok {
				fingerprintSet = utility.Set{}

				fingerprints, err := l.GetFingerprintsForLabelSet(model.LabelSet{
					labelName: labelValue,
				})
				if err != nil {
					panic(err)
					return err
				}

				for _, fingerprint := range fingerprints {
					fingerprintSet.Add(fingerprint)
				}
			}

			fingerprintSet.Add(fingerprint)
			labelPairFingerprints[labelPair] = fingerprintSet
		}
	}

	batch := leveldb.NewBatch()
	defer batch.Close()

	for labelPair, fingerprintSet := range labelPairFingerprints {
		fingerprints := model.Fingerprints{}
		for fingerprint := range fingerprintSet {
			fingerprints = append(fingerprints, fingerprint.(model.Fingerprint))
		}

		sort.Sort(fingerprints)

		key := &dto.LabelPair{
			Name:  proto.String(string(labelPair.Name)),
			Value: proto.String(string(labelPair.Value)),
		}
		value := &dto.FingerprintCollection{}
		for _, fingerprint := range fingerprints {
			value.Member = append(value.Member, fingerprint.ToDTO())
		}

		batch.Put(coding.NewProtocolBuffer(key), coding.NewProtocolBuffer(value))
	}

	err = l.labelSetToFingerprints.Commit(batch)
	if err != nil {
		panic(err)
		return
	}

	return
}
Пример #20
0
func (l *LevelDBMetricPersistence) AppendSamples(samples model.Samples) (err error) {
	begin := time.Now()
	defer func() {
		duration := time.Since(begin)

		recordOutcome(duration, err, map[string]string{operation: appendSamples, result: success}, map[string]string{operation: appendSamples, result: failure})
	}()

	var (
		fingerprintToSamples = groupByFingerprint(samples)
		indexErrChan         = make(chan error)
		watermarkErrChan     = make(chan error)
	)

	go func(groups map[model.Fingerprint]model.Samples) {
		var (
			metrics = map[model.Fingerprint]model.Metric{}
		)

		for fingerprint, samples := range groups {
			metrics[fingerprint] = samples[0].Metric
		}

		indexErrChan <- l.indexMetrics(metrics)
	}(fingerprintToSamples)

	go func(groups map[model.Fingerprint]model.Samples) {
		watermarkErrChan <- l.refreshHighWatermarks(groups)
	}(fingerprintToSamples)

	samplesBatch := leveldb.NewBatch()
	defer samplesBatch.Close()

	for fingerprint, group := range fingerprintToSamples {
		for {
			lengthOfGroup := len(group)

			if lengthOfGroup == 0 {
				break
			}

			take := *leveldbChunkSize
			if lengthOfGroup < take {
				take = lengthOfGroup
			}

			chunk := group[0:take]
			group = group[take:lengthOfGroup]

			key := &dto.SampleKey{
				Fingerprint:   fingerprint.ToDTO(),
				Timestamp:     indexable.EncodeTime(chunk[0].Timestamp),
				LastTimestamp: proto.Int64(chunk[take-1].Timestamp.Unix()),
				SampleCount:   proto.Uint32(uint32(take)),
			}

			value := &dto.SampleValueSeries{}
			for _, sample := range chunk {
				value.Value = append(value.Value, &dto.SampleValueSeries_Value{
					Timestamp: proto.Int64(sample.Timestamp.Unix()),
					Value:     proto.Float32(float32(sample.Value)),
				})
			}

			samplesBatch.Put(coding.NewProtocolBuffer(key), coding.NewProtocolBuffer(value))
		}
	}

	err = l.metricSamples.Commit(samplesBatch)
	if err != nil {
		panic(err)
	}

	err = <-indexErrChan
	if err != nil {
		panic(err)
	}

	err = <-watermarkErrChan
	if err != nil {
		panic(err)
	}

	return
}
Пример #21
0
// indexMetrics takes groups of samples, determines which ones contain metrics
// that are unknown to the storage stack, and then proceeds to update all
// affected indices.
func (l *LevelDBMetricPersistence) indexMetrics(fingerprints map[model.Fingerprint]model.Metric) (err error) {
	begin := time.Now()
	defer func() {
		duration := time.Since(begin)

		recordOutcome(duration, err, map[string]string{operation: indexMetrics, result: success}, map[string]string{operation: indexMetrics, result: failure})
	}()

	var (
		absentMetrics map[model.Fingerprint]model.Metric
	)

	absentMetrics, err = l.findUnindexedMetrics(fingerprints)
	if err != nil {
		panic(err)
	}

	if len(absentMetrics) == 0 {
		return
	}

	// TODO: For the missing fingerprints, determine what label names and pairs
	// are absent and act accordingly and append fingerprints.
	var (
		doneBuildingLabelNameIndex   = make(chan error)
		doneBuildingLabelPairIndex   = make(chan error)
		doneBuildingFingerprintIndex = make(chan error)
	)

	go func() {
		doneBuildingLabelNameIndex <- l.indexLabelNames(absentMetrics)
	}()

	go func() {
		doneBuildingLabelPairIndex <- l.indexLabelPairs(absentMetrics)
	}()

	go func() {
		doneBuildingFingerprintIndex <- l.indexFingerprints(absentMetrics)
	}()

	makeTopLevelIndex := true

	err = <-doneBuildingLabelNameIndex
	if err != nil {
		panic(err)
		makeTopLevelIndex = false
	}
	err = <-doneBuildingLabelPairIndex
	if err != nil {
		panic(err)
		makeTopLevelIndex = false
	}
	err = <-doneBuildingFingerprintIndex
	if err != nil {
		panic(err)
		makeTopLevelIndex = false
	}

	// If any of the preceding operations failed, we will have inconsistent
	// indices.  Thusly, the Metric membership index should NOT be updated, as
	// its state is used to determine whether to bulk update the other indices.
	// Given that those operations are idempotent, it is OK to repeat them;
	// however, it will consume considerable amounts of time.
	if makeTopLevelIndex {
		batch := leveldb.NewBatch()
		defer batch.Close()

		// WART: We should probably encode simple fingerprints.
		for _, metric := range absentMetrics {
			key := coding.NewProtocolBuffer(model.MetricToDTO(metric))
			batch.Put(key, key)
		}

		err := l.metricMembershipIndex.Commit(batch)
		if err != nil {
			panic(err)
			// Not critical.
			log.Println(err)
		}
	}

	return
}