Esempio n. 1
0
// newSeriesFrontier furnishes a populated diskFrontier for a given
// fingerprint.  A nil diskFrontier will be returned if the series cannot
// be found in the store.
func newSeriesFrontier(f model.Fingerprint, d diskFrontier, i leveldb.Iterator) (s *seriesFrontier, err error) {
	var (
		lowerSeek = firstSupertime
		upperSeek = lastSupertime
	)

	// If the diskFrontier for this iterator says that the candidate fingerprint
	// is outside of its seeking domain, there is no way that a seriesFrontier
	// could be materialized.  Simply bail.
	if !d.ContainsFingerprint(f) {
		return
	}

	// If we are either the first or the last key in the database, we need to use
	// pessimistic boundary frontiers.
	if f.Equal(d.firstFingerprint) {
		lowerSeek = indexable.EncodeTime(d.firstSupertime)
	}
	if f.Equal(d.lastFingerprint) {
		upperSeek = indexable.EncodeTime(d.lastSupertime)
	}

	key := &dto.SampleKey{
		Fingerprint: f.ToDTO(),
		Timestamp:   upperSeek,
	}

	raw, err := coding.NewProtocolBuffer(key).Encode()
	if err != nil {
		panic(err)
	}
	i.Seek(raw)

	if i.Key() == nil {
		return
	}

	retrievedKey, err := extractSampleKey(i)
	if err != nil {
		panic(err)
	}

	retrievedFingerprint := model.NewFingerprintFromRowKey(*retrievedKey.Fingerprint.Signature)

	// The returned fingerprint may not match if the original seek key lives
	// outside of a metric's frontier.  This is probable, for we are seeking to
	// to the maximum allowed time, which could advance us to the next
	// fingerprint.
	//
	//
	if !retrievedFingerprint.Equal(f) {
		i.Previous()

		retrievedKey, err = extractSampleKey(i)
		if err != nil {
			panic(err)
		}
		retrievedFingerprint := model.NewFingerprintFromRowKey(*retrievedKey.Fingerprint.Signature)
		// If the previous key does not match, we know that the requested
		// fingerprint does not live in the database.
		if !retrievedFingerprint.Equal(f) {
			return
		}
	}

	s = &seriesFrontier{
		lastSupertime: indexable.DecodeTime(retrievedKey.Timestamp),
		lastTime:      time.Unix(*retrievedKey.LastTimestamp, 0),
	}

	key.Timestamp = lowerSeek

	raw, err = coding.NewProtocolBuffer(key).Encode()
	if err != nil {
		panic(err)
	}

	i.Seek(raw)

	retrievedKey, err = extractSampleKey(i)
	if err != nil {
		panic(err)
	}

	retrievedFingerprint = model.NewFingerprintFromRowKey(*retrievedKey.Fingerprint.Signature)

	s.firstSupertime = indexable.DecodeTime(retrievedKey.Timestamp)

	return
}
Esempio n. 2
0
func (t *TieredStorage) loadChunkAroundTime(
	iterator leveldb.Iterator,
	fingerprint *clientmodel.Fingerprint,
	ts clientmodel.Timestamp,
	firstBlock,
	lastBlock *SampleKey,
) (chunk metric.Values, expired bool) {
	if fingerprint.Less(firstBlock.Fingerprint) {
		return nil, false
	}
	if lastBlock.Fingerprint.Less(fingerprint) {
		return nil, true
	}

	seekingKey, _ := t.sampleKeys.Get()
	defer t.sampleKeys.Give(seekingKey)

	seekingKey.Fingerprint = fingerprint

	if fingerprint.Equal(firstBlock.Fingerprint) && ts.Before(firstBlock.FirstTimestamp) {
		seekingKey.FirstTimestamp = firstBlock.FirstTimestamp
	} else if fingerprint.Equal(lastBlock.Fingerprint) && ts.After(lastBlock.FirstTimestamp) {
		seekingKey.FirstTimestamp = lastBlock.FirstTimestamp
	} else {
		seekingKey.FirstTimestamp = ts
	}

	dto, _ := t.dtoSampleKeys.Get()
	defer t.dtoSampleKeys.Give(dto)

	seekingKey.Dump(dto)
	if !iterator.Seek(dto) {
		return chunk, true
	}

	var foundValues metric.Values

	if err := iterator.Key(dto); err != nil {
		panic(err)
	}
	seekingKey.Load(dto)

	if seekingKey.Fingerprint.Equal(fingerprint) {
		// Figure out if we need to rewind by one block.
		// Imagine the following supertime blocks with time ranges:
		//
		// Block 1: ft 1000 - lt 1009 <data>
		// Block 1: ft 1010 - lt 1019 <data>
		//
		// If we are aiming to find time 1005, we would first seek to the block with
		// supertime 1010, then need to rewind by one block by virtue of LevelDB
		// iterator seek behavior.
		//
		// Only do the rewind if there is another chunk before this one.
		if !seekingKey.MayContain(ts) {
			postValues := unmarshalValues(iterator.RawValue(), nil)
			if !seekingKey.Equal(firstBlock) {
				if !iterator.Previous() {
					panic("This should never return false.")
				}

				if err := iterator.Key(dto); err != nil {
					panic(err)
				}
				seekingKey.Load(dto)

				if !seekingKey.Fingerprint.Equal(fingerprint) {
					return postValues, false
				}

				foundValues = unmarshalValues(iterator.RawValue(), nil)
				foundValues = append(foundValues, postValues...)
				return foundValues, false
			}
		}

		foundValues = unmarshalValues(iterator.RawValue(), nil)
		return foundValues, false
	}

	if fingerprint.Less(seekingKey.Fingerprint) {
		if !seekingKey.Equal(firstBlock) {
			if !iterator.Previous() {
				panic("This should never return false.")
			}

			if err := iterator.Key(dto); err != nil {
				panic(err)
			}
			seekingKey.Load(dto)

			if !seekingKey.Fingerprint.Equal(fingerprint) {
				return nil, false
			}

			foundValues = unmarshalValues(iterator.RawValue(), nil)
			return foundValues, false
		}
	}

	panic("illegal state: violated sort invariant")
}
Esempio n. 3
0
func (t *tieredStorage) loadChunkAroundTime(iterator leveldb.Iterator, frontier *seriesFrontier, fingerprint model.Fingerprint, ts time.Time) (chunk []model.SamplePair) {
	var (
		targetKey = &dto.SampleKey{
			Fingerprint: fingerprint.ToDTO(),
		}
		foundKey   = &dto.SampleKey{}
		foundValue *dto.SampleValueSeries
	)

	// Limit the target key to be within the series' keyspace.
	if ts.After(frontier.lastSupertime) {
		targetKey.Timestamp = indexable.EncodeTime(frontier.lastSupertime)
	} else {
		targetKey.Timestamp = indexable.EncodeTime(ts)
	}

	// Try seeking to target key.
	rawKey, _ := coding.NewProtocolBuffer(targetKey).Encode()
	iterator.Seek(rawKey)

	foundKey, err := extractSampleKey(iterator)
	if err != nil {
		panic(err)
	}

	// Figure out if we need to rewind by one block.
	// Imagine the following supertime blocks with time ranges:
	//
	// Block 1: ft 1000 - lt 1009 <data>
	// Block 1: ft 1010 - lt 1019 <data>
	//
	// If we are aiming to find time 1005, we would first seek to the block with
	// supertime 1010, then need to rewind by one block by virtue of LevelDB
	// iterator seek behavior.
	//
	// Only do the rewind if there is another chunk before this one.
	rewound := false
	firstTime := indexable.DecodeTime(foundKey.Timestamp)
	if ts.Before(firstTime) && !frontier.firstSupertime.After(ts) {
		iterator.Previous()
		rewound = true
	}

	foundValue, err = extractSampleValues(iterator)
	if err != nil {
		panic(err)
	}

	// If we rewound, but the target time is still past the current block, return
	// the last value of the current (rewound) block and the entire next block.
	if rewound {
		foundKey, err = extractSampleKey(iterator)
		if err != nil {
			panic(err)
		}
		currentChunkLastTime := time.Unix(*foundKey.LastTimestamp, 0)

		if ts.After(currentChunkLastTime) {
			sampleCount := len(foundValue.Value)
			chunk = append(chunk, model.SamplePair{
				Timestamp: time.Unix(*foundValue.Value[sampleCount-1].Timestamp, 0),
				Value:     model.SampleValue(*foundValue.Value[sampleCount-1].Value),
			})
			// We know there's a next block since we have rewound from it.
			iterator.Next()

			foundValue, err = extractSampleValues(iterator)
			if err != nil {
				panic(err)
			}
		}
	}

	// Now append all the samples of the currently seeked block to the output.
	for _, sample := range foundValue.Value {
		chunk = append(chunk, model.SamplePair{
			Timestamp: time.Unix(*sample.Timestamp, 0),
			Value:     model.SampleValue(*sample.Value),
		})
	}

	return
}