コード例 #1
0
ファイル: alerting.go プロジェクト: pjjw/prometheus
func (rule *AlertingRule) Eval(timestamp clientmodel.Timestamp, storage metric.PreloadingPersistence) (ast.Vector, error) {
	// Get the raw value of the rule expression.
	exprResult, err := rule.EvalRaw(timestamp, storage)
	if err != nil {
		return nil, err
	}

	rule.mutex.Lock()
	defer rule.mutex.Unlock()

	// Create pending alerts for any new vector elements in the alert expression
	// or update the expression value for existing elements.
	resultFingerprints := utility.Set{}
	for _, sample := range exprResult {
		fp := new(clientmodel.Fingerprint)
		fp.LoadFromMetric(sample.Metric)
		resultFingerprints.Add(*fp)

		if alert, ok := rule.activeAlerts[*fp]; !ok {
			labels := clientmodel.LabelSet{}
			labels.MergeFromMetric(sample.Metric)
			labels = labels.Merge(rule.Labels)
			if _, ok := labels[clientmodel.MetricNameLabel]; ok {
				delete(labels, clientmodel.MetricNameLabel)
			}
			rule.activeAlerts[*fp] = &Alert{
				Name:        rule.name,
				Labels:      labels,
				State:       PENDING,
				ActiveSince: timestamp,
				Value:       sample.Value,
			}
		} else {
			alert.Value = sample.Value
		}
	}

	vector := ast.Vector{}

	// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
	for fp, activeAlert := range rule.activeAlerts {
		if !resultFingerprints.Has(fp) {
			vector = append(vector, activeAlert.sample(timestamp, 0))
			delete(rule.activeAlerts, fp)
			continue
		}

		if activeAlert.State == PENDING && timestamp.Sub(activeAlert.ActiveSince) >= rule.holdDuration {
			vector = append(vector, activeAlert.sample(timestamp, 0))
			activeAlert.State = FIRING
		}

		vector = append(vector, activeAlert.sample(timestamp, 1))
	}

	return vector, nil
}
コード例 #2
0
func BenchmarkLoadChunkDescs(b *testing.B) {
	p := persistence{
		basePath: "fixtures",
	}

	var fp clientmodel.Fingerprint
	for i := 0; i < b.N; i++ {
		for _, s := range fpStrings {
			fp.LoadFromString(s)
			cds, err := p.loadChunkDescs(fp, 0)
			if err != nil {
				b.Error(err)
			}
			if len(cds) == 0 {
				b.Error("could not read any chunk descs")
			}
		}
	}
}
コード例 #3
0
func BenchmarkLoadChunksRandomly(b *testing.B) {
	p := persistence{
		basePath: "fixtures",
		bufPool:  sync.Pool{New: func() interface{} { return make([]byte, 0, 3*chunkLenWithHeader) }},
	}
	randomIndexes := []int{1, 5, 6, 8, 11, 14, 18, 23, 29, 33, 42, 46}

	var fp clientmodel.Fingerprint
	for i := 0; i < b.N; i++ {
		for _, s := range fpStrings {
			fp.LoadFromString(s)
			cds, err := p.loadChunks(fp, randomIndexes, 0)
			if err != nil {
				b.Error(err)
			}
			if len(cds) == 0 {
				b.Error("could not read any chunks")
			}
		}
	}
}
コード例 #4
0
func BenchmarkLoadChunksSequentially(b *testing.B) {
	p := persistence{
		basePath: "fixtures",
		bufPool:  sync.Pool{New: func() interface{} { return make([]byte, 0, 3*chunkLenWithHeader) }},
	}
	sequentialIndexes := make([]int, 47)
	for i := range sequentialIndexes {
		sequentialIndexes[i] = i
	}

	var fp clientmodel.Fingerprint
	for i := 0; i < b.N; i++ {
		for _, s := range fpStrings {
			fp.LoadFromString(s)
			cds, err := p.loadChunks(fp, sequentialIndexes, 0)
			if err != nil {
				b.Error(err)
			}
			if len(cds) == 0 {
				b.Error("could not read any chunks")
			}
		}
	}
}
コード例 #5
0
ファイル: persistence.go プロジェクト: bluecmd/prometheus
func (p *persistence) tempFileNameForFingerprint(fp clientmodel.Fingerprint) string {
	fpStr := fp.String()
	return path.Join(p.basePath, fpStr[0:seriesDirNameLen], fpStr[seriesDirNameLen:]+seriesTempFileSuffix)
}
コード例 #6
0
ファイル: persistence.go プロジェクト: bluecmd/prometheus
func (p *persistence) dirNameForFingerprint(fp clientmodel.Fingerprint) string {
	fpStr := fp.String()
	return path.Join(p.basePath, fpStr[0:seriesDirNameLen])
}
コード例 #7
0
ファイル: dto.go プロジェクト: pjjw/prometheus
func loadFingerprint(f *clientmodel.Fingerprint, d *dto.Fingerprint) {
	f.LoadFromString(d.GetSignature())
}
コード例 #8
0
ファイル: dto.go プロジェクト: pjjw/prometheus
func dumpFingerprint(d *dto.Fingerprint, f *clientmodel.Fingerprint) {
	d.Reset()

	d.Signature = proto.String(f.String())
}
コード例 #9
0
// sanitizeSeries sanitizes a series based on its series file as defined by the
// provided directory and FileInfo.  The method returns the fingerprint as
// derived from the directory and file name, and whether the provided file has
// been sanitized. A file that failed to be sanitized is moved into the
// "orphaned" sub-directory, if possible.
//
// The following steps are performed:
//
// - A file whose name doesn't comply with the naming scheme of a series file is
//   simply moved into the orphaned directory.
//
// - If the size of the series file isn't a multiple of the chunk size,
//   extraneous bytes are truncated.  If the truncation fails, the file is
//   moved into the orphaned directory.
//
// - A file that is empty (after truncation) is deleted.
//
// - A series that is not archived (i.e. it is in the fingerprintToSeries map)
//   is checked for consistency of its various parameters (like persist
//   watermark, offset of chunkDescs etc.). In particular, overlap between an
//   in-memory head chunk with the most recent persisted chunk is
//   checked. Inconsistencies are rectified.
//
// - A series that is archived (i.e. it is not in the fingerprintToSeries map)
//   is checked for its presence in the index of archived series. If it cannot
//   be found there, it is moved into the orphaned directory.
func (p *persistence) sanitizeSeries(
	dirname string, fi os.FileInfo,
	fingerprintToSeries map[clientmodel.Fingerprint]*memorySeries,
	fpm fpMappings,
) (clientmodel.Fingerprint, bool) {
	filename := path.Join(dirname, fi.Name())
	purge := func() {
		var err error
		defer func() {
			if err != nil {
				log.Errorf("Failed to move lost series file %s to orphaned directory, deleting it instead. Error was: %s", filename, err)
				if err = os.Remove(filename); err != nil {
					log.Errorf("Even deleting file %s did not work: %s", filename, err)
				}
			}
		}()
		orphanedDir := path.Join(p.basePath, "orphaned", path.Base(dirname))
		if err = os.MkdirAll(orphanedDir, 0700); err != nil {
			return
		}
		if err = os.Rename(filename, path.Join(orphanedDir, fi.Name())); err != nil {
			return
		}
	}

	var fp clientmodel.Fingerprint
	if len(fi.Name()) != fpLen-seriesDirNameLen+len(seriesFileSuffix) ||
		!strings.HasSuffix(fi.Name(), seriesFileSuffix) {
		log.Warnf("Unexpected series file name %s.", filename)
		purge()
		return fp, false
	}
	if err := fp.LoadFromString(path.Base(dirname) + fi.Name()[:fpLen-seriesDirNameLen]); err != nil {
		log.Warnf("Error parsing file name %s: %s", filename, err)
		purge()
		return fp, false
	}

	bytesToTrim := fi.Size() % int64(chunkLenWithHeader)
	chunksInFile := int(fi.Size()) / chunkLenWithHeader
	modTime := fi.ModTime()
	if bytesToTrim != 0 {
		log.Warnf(
			"Truncating file %s to exactly %d chunks, trimming %d extraneous bytes.",
			filename, chunksInFile, bytesToTrim,
		)
		f, err := os.OpenFile(filename, os.O_WRONLY, 0640)
		if err != nil {
			log.Errorf("Could not open file %s: %s", filename, err)
			purge()
			return fp, false
		}
		if err := f.Truncate(fi.Size() - bytesToTrim); err != nil {
			log.Errorf("Failed to truncate file %s: %s", filename, err)
			purge()
			return fp, false
		}
	}
	if chunksInFile == 0 {
		log.Warnf("No chunks left in file %s.", filename)
		purge()
		return fp, false
	}

	s, ok := fingerprintToSeries[fp]
	if ok { // This series is supposed to not be archived.
		if s == nil {
			panic("fingerprint mapped to nil pointer")
		}
		maybeAddMapping(fp, s.metric, fpm)
		if !p.pedanticChecks &&
			bytesToTrim == 0 &&
			s.chunkDescsOffset != -1 &&
			chunksInFile == s.chunkDescsOffset+s.persistWatermark &&
			modTime.Equal(s.modTime) {
			// Everything is consistent. We are good.
			return fp, true
		}
		// If we are here, we cannot be sure the series file is
		// consistent with the checkpoint, so we have to take a closer
		// look.
		if s.headChunkClosed {
			// This is the easy case as we have all chunks on
			// disk. Treat this series as a freshly unarchived one
			// by loading the chunkDescs and setting all parameters
			// based on the loaded chunkDescs.
			cds, err := p.loadChunkDescs(fp, 0)
			if err != nil {
				log.Errorf(
					"Failed to load chunk descriptors for metric %v, fingerprint %v: %s",
					s.metric, fp, err,
				)
				purge()
				return fp, false
			}
			log.Warnf(
				"Treating recovered metric %v, fingerprint %v, as freshly unarchived, with %d chunks in series file.",
				s.metric, fp, len(cds),
			)
			s.chunkDescs = cds
			s.chunkDescsOffset = 0
			s.savedFirstTime = cds[0].firstTime()
			s.lastTime = cds[len(cds)-1].lastTime()
			s.persistWatermark = len(cds)
			s.modTime = modTime
			return fp, true
		}
		// This is the tricky one: We have chunks from heads.db, but
		// some of those chunks might already be in the series
		// file. Strategy: Take the last time of the most recent chunk
		// in the series file. Then find the oldest chunk among those
		// from heads.db that has a first time later or equal to the
		// last time from the series file. Throw away the older chunks
		// from heads.db and stitch the parts together.

		// First, throw away the chunkDescs without chunks.
		s.chunkDescs = s.chunkDescs[s.persistWatermark:]
		numMemChunkDescs.Sub(float64(s.persistWatermark))
		cds, err := p.loadChunkDescs(fp, 0)
		if err != nil {
			log.Errorf(
				"Failed to load chunk descriptors for metric %v, fingerprint %v: %s",
				s.metric, fp, err,
			)
			purge()
			return fp, false
		}
		s.persistWatermark = len(cds)
		s.chunkDescsOffset = 0
		s.savedFirstTime = cds[0].firstTime()
		s.modTime = modTime

		lastTime := cds[len(cds)-1].lastTime()
		keepIdx := -1
		for i, cd := range s.chunkDescs {
			if cd.firstTime() >= lastTime {
				keepIdx = i
				break
			}
		}
		if keepIdx == -1 {
			log.Warnf(
				"Recovered metric %v, fingerprint %v: all %d chunks recovered from series file.",
				s.metric, fp, chunksInFile,
			)
			numMemChunkDescs.Sub(float64(len(s.chunkDescs)))
			atomic.AddInt64(&numMemChunks, int64(-len(s.chunkDescs)))
			s.chunkDescs = cds
			s.headChunkClosed = true
			return fp, true
		}
		log.Warnf(
			"Recovered metric %v, fingerprint %v: recovered %d chunks from series file, recovered %d chunks from checkpoint.",
			s.metric, fp, chunksInFile, len(s.chunkDescs)-keepIdx,
		)
		numMemChunkDescs.Sub(float64(keepIdx))
		atomic.AddInt64(&numMemChunks, int64(-keepIdx))
		s.chunkDescs = append(cds, s.chunkDescs[keepIdx:]...)
		return fp, true
	}
	// This series is supposed to be archived.
	metric, err := p.archivedMetric(fp)
	if err != nil {
		log.Errorf(
			"Fingerprint %v assumed archived but couldn't be looked up in archived index: %s",
			fp, err,
		)
		purge()
		return fp, false
	}
	if metric == nil {
		log.Warnf(
			"Fingerprint %v assumed archived but couldn't be found in archived index.",
			fp,
		)
		purge()
		return fp, false
	}
	// This series looks like a properly archived one.
	maybeAddMapping(fp, metric, fpm)
	return fp, true
}
コード例 #10
0
ファイル: tiered.go プロジェクト: pjjw/prometheus
func (t *TieredStorage) loadChunkAroundTime(
	iterator leveldb.Iterator,
	fingerprint *clientmodel.Fingerprint,
	ts clientmodel.Timestamp,
	firstBlock,
	lastBlock *SampleKey,
) (chunk metric.Values, expired bool) {
	if fingerprint.Less(firstBlock.Fingerprint) {
		return nil, false
	}
	if lastBlock.Fingerprint.Less(fingerprint) {
		return nil, true
	}

	seekingKey, _ := t.sampleKeys.Get()
	defer t.sampleKeys.Give(seekingKey)

	seekingKey.Fingerprint = fingerprint

	if fingerprint.Equal(firstBlock.Fingerprint) && ts.Before(firstBlock.FirstTimestamp) {
		seekingKey.FirstTimestamp = firstBlock.FirstTimestamp
	} else if fingerprint.Equal(lastBlock.Fingerprint) && ts.After(lastBlock.FirstTimestamp) {
		seekingKey.FirstTimestamp = lastBlock.FirstTimestamp
	} else {
		seekingKey.FirstTimestamp = ts
	}

	dto, _ := t.dtoSampleKeys.Get()
	defer t.dtoSampleKeys.Give(dto)

	seekingKey.Dump(dto)
	if !iterator.Seek(dto) {
		return chunk, true
	}

	var foundValues metric.Values

	if err := iterator.Key(dto); err != nil {
		panic(err)
	}
	seekingKey.Load(dto)

	if seekingKey.Fingerprint.Equal(fingerprint) {
		// Figure out if we need to rewind by one block.
		// Imagine the following supertime blocks with time ranges:
		//
		// Block 1: ft 1000 - lt 1009 <data>
		// Block 1: ft 1010 - lt 1019 <data>
		//
		// If we are aiming to find time 1005, we would first seek to the block with
		// supertime 1010, then need to rewind by one block by virtue of LevelDB
		// iterator seek behavior.
		//
		// Only do the rewind if there is another chunk before this one.
		if !seekingKey.MayContain(ts) {
			postValues := unmarshalValues(iterator.RawValue(), nil)
			if !seekingKey.Equal(firstBlock) {
				if !iterator.Previous() {
					panic("This should never return false.")
				}

				if err := iterator.Key(dto); err != nil {
					panic(err)
				}
				seekingKey.Load(dto)

				if !seekingKey.Fingerprint.Equal(fingerprint) {
					return postValues, false
				}

				foundValues = unmarshalValues(iterator.RawValue(), nil)
				foundValues = append(foundValues, postValues...)
				return foundValues, false
			}
		}

		foundValues = unmarshalValues(iterator.RawValue(), nil)
		return foundValues, false
	}

	if fingerprint.Less(seekingKey.Fingerprint) {
		if !seekingKey.Equal(firstBlock) {
			if !iterator.Previous() {
				panic("This should never return false.")
			}

			if err := iterator.Key(dto); err != nil {
				panic(err)
			}
			seekingKey.Load(dto)

			if !seekingKey.Fingerprint.Equal(fingerprint) {
				return nil, false
			}

			foundValues = unmarshalValues(iterator.RawValue(), nil)
			return foundValues, false
		}
	}

	panic("illegal state: violated sort invariant")
}
コード例 #11
0
ファイル: persistence.go プロジェクト: gitlabuser/prometheus
// sanitizeSeries sanitizes a series based on its series file as defined by the
// provided directory and FileInfo.  The method returns the fingerprint as
// derived from the directory and file name, and whether the provided file has
// been sanitized. A file that failed to be sanitized is deleted, if possible.
//
// The following steps are performed:
//
// - A file whose name doesn't comply with the naming scheme of a series file is
//   simply deleted.
//
// - If the size of the series file isn't a multiple of the chunk size,
//   extraneous bytes are truncated.  If the truncation fails, the file is
//   deleted instead.
//
// - A file that is empty (after truncation) is deleted.
//
// - A series that is not archived (i.e. it is in the fingerprintToSeries map)
//   is checked for consistency of its various parameters (like head-chunk
//   persistence state, offset of chunkDescs etc.). In particular, overlap
//   between an in-memory head chunk with the most recent persisted chunk is
//   checked. Inconsistencies are rectified.
//
// - A series this in archived (i.e. it is not in the fingerprintToSeries map)
//   is checked for its presence in the index of archived series. If it cannot
//   be found there, it is deleted.
func (p *persistence) sanitizeSeries(dirname string, fi os.FileInfo, fingerprintToSeries map[clientmodel.Fingerprint]*memorySeries) (clientmodel.Fingerprint, bool) {
	filename := path.Join(dirname, fi.Name())
	purge := func() {
		glog.Warningf("Deleting lost series file %s.", filename) // TODO: Move to lost+found directory?
		os.Remove(filename)
	}

	var fp clientmodel.Fingerprint
	if len(fi.Name()) != fpLen-seriesDirNameLen+len(seriesFileSuffix) ||
		!strings.HasSuffix(fi.Name(), seriesFileSuffix) {
		glog.Warningf("Unexpected series file name %s.", filename)
		purge()
		return fp, false
	}
	if err := fp.LoadFromString(path.Base(dirname) + fi.Name()[:fpLen-seriesDirNameLen]); err != nil {
		glog.Warningf("Error parsing file name %s: %s", filename, err)
		purge()
		return fp, false
	}

	bytesToTrim := fi.Size() % int64(p.chunkLen+chunkHeaderLen)
	chunksInFile := int(fi.Size()) / (p.chunkLen + chunkHeaderLen)
	if bytesToTrim != 0 {
		glog.Warningf(
			"Truncating file %s to exactly %d chunks, trimming %d extraneous bytes.",
			filename, chunksInFile, bytesToTrim,
		)
		f, err := os.OpenFile(filename, os.O_WRONLY, 0640)
		if err != nil {
			glog.Errorf("Could not open file %s: %s", filename, err)
			purge()
			return fp, false
		}
		if err := f.Truncate(fi.Size() - bytesToTrim); err != nil {
			glog.Errorf("Failed to truncate file %s: %s", filename, err)
			purge()
			return fp, false
		}
	}
	if chunksInFile == 0 {
		glog.Warningf("No chunks left in file %s.", filename)
		purge()
		return fp, false
	}

	s, ok := fingerprintToSeries[fp]
	if ok { // This series is supposed to not be archived.
		if s == nil {
			panic("fingerprint mapped to nil pointer")
		}
		if bytesToTrim == 0 && s.chunkDescsOffset != -1 &&
			((s.headChunkPersisted && chunksInFile == s.chunkDescsOffset+len(s.chunkDescs)) ||
				(!s.headChunkPersisted && chunksInFile == s.chunkDescsOffset+len(s.chunkDescs)-1)) {
			// Everything is consistent. We are good.
			return fp, true
		}
		// If we are here, something's fishy.
		if s.headChunkPersisted {
			// This is the easy case as we don't have a head chunk
			// in heads.db. Treat this series as a freshly
			// unarchived one. No chunks or chunkDescs in memory, no
			// current head chunk.
			glog.Warningf(
				"Treating recovered metric %v, fingerprint %v, as freshly unarchived, with %d chunks in series file.",
				s.metric, fp, chunksInFile,
			)
			s.chunkDescs = nil
			s.chunkDescsOffset = -1
			return fp, true
		}
		// This is the tricky one: We have a head chunk from heads.db,
		// but the very same head chunk might already be in the series
		// file. Strategy: Check the first time of both. If it is the
		// same or newer, assume the latest chunk in the series file
		// is the most recent head chunk. If not, keep the head chunk
		// we got from heads.db.
		// First, assume the head chunk is not yet persisted.
		s.chunkDescs = s.chunkDescs[len(s.chunkDescs)-1:]
		s.chunkDescsOffset = -1
		// Load all the chunk descs (which assumes we have none from the future).
		cds, err := p.loadChunkDescs(fp, clientmodel.Now())
		if err != nil {
			glog.Errorf(
				"Failed to load chunk descriptors for metric %v, fingerprint %v: %s",
				s.metric, fp, err,
			)
			purge()
			return fp, false
		}
		if cds[len(cds)-1].firstTime().Before(s.head().firstTime()) {
			s.chunkDescs = append(cds, s.chunkDescs...)
			glog.Warningf(
				"Recovered metric %v, fingerprint %v: recovered %d chunks from series file, recovered head chunk from checkpoint.",
				s.metric, fp, chunksInFile,
			)
		} else {
			glog.Warningf(
				"Recovered metric %v, fingerprint %v: head chunk found among the %d recovered chunks in series file.",
				s.metric, fp, chunksInFile,
			)
			s.chunkDescs = cds
			s.headChunkPersisted = true
		}
		s.chunkDescsOffset = 0
		return fp, true
	}
	// This series is supposed to be archived.
	metric, err := p.getArchivedMetric(fp)
	if err != nil {
		glog.Errorf(
			"Fingerprint %v assumed archived but couldn't be looked up in archived index: %s",
			fp, err,
		)
		purge()
		return fp, false
	}
	if metric == nil {
		glog.Warningf(
			"Fingerprint %v assumed archived but couldn't be found in archived index.",
			fp,
		)
		purge()
		return fp, false
	}
	// This series looks like a properly archived one.
	return fp, true
}