Exemple #1
0
func (t *Target) scrape(sampleAppender storage.SampleAppender) (err error) {
	start := time.Now()
	baseLabels := t.BaseLabels()

	defer func() {
		t.status.setLastError(err)
		recordScrapeHealth(sampleAppender, clientmodel.TimestampFromTime(start), baseLabels, t.status.Health(), time.Since(start))
	}()

	req, err := http.NewRequest("GET", t.URL(), nil)
	if err != nil {
		panic(err)
	}
	req.Header.Add("Accept", acceptHeader)

	resp, err := t.httpClient.Do(req)
	if err != nil {
		return err
	}
	defer resp.Body.Close()
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("server returned HTTP status %s", resp.Status)
	}

	processor, err := extraction.ProcessorForRequestHeader(resp.Header)
	if err != nil {
		return err
	}

	t.ingestedSamples = make(chan clientmodel.Samples, ingestedSamplesCap)

	processOptions := &extraction.ProcessOptions{
		Timestamp: clientmodel.TimestampFromTime(start),
	}
	go func() {
		err = processor.ProcessSingle(resp.Body, t, processOptions)
		close(t.ingestedSamples)
	}()

	for samples := range t.ingestedSamples {
		for _, s := range samples {
			s.Metric.MergeFromLabelSet(baseLabels, clientmodel.ExporterLabelPrefix)
			// Avoid the copy in Relabel if there are no configs.
			if len(t.metricRelabelConfigs) > 0 {
				labels, err := Relabel(clientmodel.LabelSet(s.Metric), t.metricRelabelConfigs...)
				if err != nil {
					log.Errorf("error while relabeling metric %s of instance %s: ", s.Metric, t.url, err)
					continue
				}
				// Check if the timeseries was dropped.
				if labels == nil {
					continue
				}
				s.Metric = clientmodel.Metric(labels)
			}
			sampleAppender.Append(s)
		}
	}
	return err
}
func (p *persistence) rebuildLabelIndexes(
	fpToSeries map[clientmodel.Fingerprint]*memorySeries,
) error {
	count := 0
	log.Info("Rebuilding label indexes.")
	log.Info("Indexing metrics in memory.")
	for fp, s := range fpToSeries {
		p.indexMetric(fp, s.metric)
		count++
		if count%10000 == 0 {
			log.Infof("%d metrics queued for indexing.", count)
		}
	}
	log.Info("Indexing archived metrics.")
	var fp codable.Fingerprint
	var m codable.Metric
	if err := p.archivedFingerprintToMetrics.ForEach(func(kv index.KeyValueAccessor) error {
		if err := kv.Key(&fp); err != nil {
			return err
		}
		if err := kv.Value(&m); err != nil {
			return err
		}
		p.indexMetric(clientmodel.Fingerprint(fp), clientmodel.Metric(m))
		count++
		if count%10000 == 0 {
			log.Infof("%d metrics queued for indexing.", count)
		}
		return nil
	}); err != nil {
		return err
	}
	log.Info("All requests for rebuilding the label indexes queued. (Actual processing may lag behind.)")
	return nil
}
Exemple #3
0
// metric parses a metric.
//
//		<label_set>
//		<metric_identifier> [<label_set>]
//
func (p *parser) metric() clientmodel.Metric {
	name := ""
	m := clientmodel.Metric{}

	t := p.peek().typ
	if t == itemIdentifier || t == itemMetricIdentifier {
		name = p.next().val
		t = p.peek().typ
	}
	if t != itemLeftBrace && name == "" {
		p.errorf("missing metric name or metric selector")
	}
	if t == itemLeftBrace {
		m = clientmodel.Metric(p.labelSet())
	}
	if name != "" {
		m[clientmodel.MetricNameLabel] = clientmodel.LabelValue(name)
	}
	return m
}
func (p *processor001) ProcessSingle(in io.Reader, out Ingester, o *ProcessOptions) error {
	// TODO(matt): Replace with plain-jane JSON unmarshalling.
	buffer, err := ioutil.ReadAll(in)
	if err != nil {
		return err
	}

	entities := entity001{}
	if err = json.Unmarshal(buffer, &entities); err != nil {
		return err
	}

	// TODO(matt): This outer loop is a great basis for parallelization.
	pendingSamples := model.Samples{}
	for _, entity := range entities {
		for _, value := range entity.Metric.Value {
			labels := labelSet(entity.BaseLabels).Merge(labelSet(value.Labels))

			switch entity.Metric.MetricType {
			case gauge001, counter001:
				sampleValue, ok := value.Value.(float64)
				if !ok {
					return fmt.Errorf("could not convert value from %s %s to float64", entity, value)
				}

				pendingSamples = append(pendingSamples, &model.Sample{
					Metric:    model.Metric(labels),
					Timestamp: o.Timestamp,
					Value:     model.SampleValue(sampleValue),
				})

				break

			case histogram001:
				sampleValue, ok := value.Value.(map[string]interface{})
				if !ok {
					return fmt.Errorf("could not convert value from %q to a map[string]interface{}", value.Value)
				}

				for percentile, percentileValue := range sampleValue {
					individualValue, ok := percentileValue.(float64)
					if !ok {
						return fmt.Errorf("could not convert value from %q to a float64", percentileValue)
					}

					childMetric := make(map[model.LabelName]model.LabelValue, len(labels)+1)

					for k, v := range labels {
						childMetric[k] = v
					}

					childMetric[model.LabelName(percentile001)] = model.LabelValue(percentile)

					pendingSamples = append(pendingSamples, &model.Sample{
						Metric:    model.Metric(childMetric),
						Timestamp: o.Timestamp,
						Value:     model.SampleValue(individualValue),
					})
				}

				break
			}
		}
	}
	if len(pendingSamples) > 0 {
		return out.Ingest(pendingSamples)
	}

	return nil
}
func (p *processor002) ProcessSingle(in io.Reader, out Ingester, o *ProcessOptions) error {
	// Processor for telemetry schema version 0.0.2.
	// container for telemetry data
	var entities []struct {
		BaseLabels map[string]string `json:"baseLabels"`
		Docstring  string            `json:"docstring"`
		Metric     struct {
			Type   string          `json:"type"`
			Values json.RawMessage `json:"value"`
		} `json:"metric"`
	}

	if err := json.NewDecoder(in).Decode(&entities); err != nil {
		return err
	}

	pendingSamples := model.Samples{}
	for _, entity := range entities {
		switch entity.Metric.Type {
		case "counter", "gauge":
			var values []counter002

			if err := json.Unmarshal(entity.Metric.Values, &values); err != nil {
				return fmt.Errorf("could not extract %s value: %s", entity.Metric.Type, err)
			}

			for _, counter := range values {
				labels := labelSet(entity.BaseLabels).Merge(labelSet(counter.Labels))

				pendingSamples = append(pendingSamples, &model.Sample{
					Metric:    model.Metric(labels),
					Timestamp: o.Timestamp,
					Value:     counter.Value,
				})
			}

		case "histogram":
			var values []histogram002

			if err := json.Unmarshal(entity.Metric.Values, &values); err != nil {
				return fmt.Errorf("could not extract %s value: %s", entity.Metric.Type, err)
			}

			for _, histogram := range values {
				for percentile, value := range histogram.Values {
					labels := labelSet(entity.BaseLabels).Merge(labelSet(histogram.Labels))
					labels[model.LabelName("percentile")] = model.LabelValue(percentile)

					pendingSamples = append(pendingSamples, &model.Sample{
						Metric:    model.Metric(labels),
						Timestamp: o.Timestamp,
						Value:     value,
					})
				}
			}

		default:
			return fmt.Errorf("unknown metric type %q", entity.Metric.Type)
		}
	}

	if len(pendingSamples) > 0 {
		return out.Ingest(pendingSamples)
	}

	return nil
}
Exemple #6
0
// loadSeriesMapAndHeads loads the fingerprint to memory-series mapping and all
// the chunks contained in the checkpoint (and thus not yet persisted to series
// files). The method is capable of loading the checkpoint format v1 and v2. If
// recoverable corruption is detected, or if the dirty flag was set from the
// beginning, crash recovery is run, which might take a while. If an
// unrecoverable error is encountered, it is returned. Call this method during
// start-up while nothing else is running in storage land. This method is
// utterly goroutine-unsafe.
func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, chunksToPersist int64, err error) {
	var chunkDescsTotal int64
	fingerprintToSeries := make(map[clientmodel.Fingerprint]*memorySeries)
	sm = &seriesMap{m: fingerprintToSeries}

	defer func() {
		if sm != nil && p.dirty {
			log.Warn("Persistence layer appears dirty.")
			err = p.recoverFromCrash(fingerprintToSeries)
			if err != nil {
				sm = nil
			}
		}
		if err == nil {
			numMemChunkDescs.Add(float64(chunkDescsTotal))
		}
	}()

	f, err := os.Open(p.headsFileName())
	if os.IsNotExist(err) {
		return sm, 0, nil
	}
	if err != nil {
		log.Warn("Could not open heads file:", err)
		p.dirty = true
		return
	}
	defer f.Close()
	r := bufio.NewReaderSize(f, fileBufSize)

	buf := make([]byte, len(headsMagicString))
	if _, err := io.ReadFull(r, buf); err != nil {
		log.Warn("Could not read from heads file:", err)
		p.dirty = true
		return sm, 0, nil
	}
	magic := string(buf)
	if magic != headsMagicString {
		log.Warnf(
			"unexpected magic string, want %q, got %q",
			headsMagicString, magic,
		)
		p.dirty = true
		return
	}
	version, err := binary.ReadVarint(r)
	if (version != headsFormatVersion && version != headsFormatLegacyVersion) || err != nil {
		log.Warnf("unknown heads format version, want %d", headsFormatVersion)
		p.dirty = true
		return sm, 0, nil
	}
	numSeries, err := codable.DecodeUint64(r)
	if err != nil {
		log.Warn("Could not decode number of series:", err)
		p.dirty = true
		return sm, 0, nil
	}

	for ; numSeries > 0; numSeries-- {
		seriesFlags, err := r.ReadByte()
		if err != nil {
			log.Warn("Could not read series flags:", err)
			p.dirty = true
			return sm, chunksToPersist, nil
		}
		headChunkPersisted := seriesFlags&flagHeadChunkPersisted != 0
		fp, err := codable.DecodeUint64(r)
		if err != nil {
			log.Warn("Could not decode fingerprint:", err)
			p.dirty = true
			return sm, chunksToPersist, nil
		}
		var metric codable.Metric
		if err := metric.UnmarshalFromReader(r); err != nil {
			log.Warn("Could not decode metric:", err)
			p.dirty = true
			return sm, chunksToPersist, nil
		}
		var persistWatermark int64
		var modTime time.Time
		if version != headsFormatLegacyVersion {
			// persistWatermark only present in v2.
			persistWatermark, err = binary.ReadVarint(r)
			if err != nil {
				log.Warn("Could not decode persist watermark:", err)
				p.dirty = true
				return sm, chunksToPersist, nil
			}
			modTimeNano, err := binary.ReadVarint(r)
			if err != nil {
				log.Warn("Could not decode modification time:", err)
				p.dirty = true
				return sm, chunksToPersist, nil
			}
			if modTimeNano != -1 {
				modTime = time.Unix(0, modTimeNano)
			}
		}
		chunkDescsOffset, err := binary.ReadVarint(r)
		if err != nil {
			log.Warn("Could not decode chunk descriptor offset:", err)
			p.dirty = true
			return sm, chunksToPersist, nil
		}
		savedFirstTime, err := binary.ReadVarint(r)
		if err != nil {
			log.Warn("Could not decode saved first time:", err)
			p.dirty = true
			return sm, chunksToPersist, nil
		}
		numChunkDescs, err := binary.ReadVarint(r)
		if err != nil {
			log.Warn("Could not decode number of chunk descriptors:", err)
			p.dirty = true
			return sm, chunksToPersist, nil
		}
		chunkDescs := make([]*chunkDesc, numChunkDescs)
		if version == headsFormatLegacyVersion {
			if headChunkPersisted {
				persistWatermark = numChunkDescs
			} else {
				persistWatermark = numChunkDescs - 1
			}
		}

		for i := int64(0); i < numChunkDescs; i++ {
			if i < persistWatermark {
				firstTime, err := binary.ReadVarint(r)
				if err != nil {
					log.Warn("Could not decode first time:", err)
					p.dirty = true
					return sm, chunksToPersist, nil
				}
				lastTime, err := binary.ReadVarint(r)
				if err != nil {
					log.Warn("Could not decode last time:", err)
					p.dirty = true
					return sm, chunksToPersist, nil
				}
				chunkDescs[i] = &chunkDesc{
					chunkFirstTime: clientmodel.Timestamp(firstTime),
					chunkLastTime:  clientmodel.Timestamp(lastTime),
				}
				chunkDescsTotal++
			} else {
				// Non-persisted chunk.
				encoding, err := r.ReadByte()
				if err != nil {
					log.Warn("Could not decode chunk type:", err)
					p.dirty = true
					return sm, chunksToPersist, nil
				}
				chunk := newChunkForEncoding(chunkEncoding(encoding))
				if err := chunk.unmarshal(r); err != nil {
					log.Warn("Could not decode chunk:", err)
					p.dirty = true
					return sm, chunksToPersist, nil
				}
				chunkDescs[i] = newChunkDesc(chunk)
				chunksToPersist++
			}
		}

		fingerprintToSeries[clientmodel.Fingerprint(fp)] = &memorySeries{
			metric:           clientmodel.Metric(metric),
			chunkDescs:       chunkDescs,
			persistWatermark: int(persistWatermark),
			modTime:          modTime,
			chunkDescsOffset: int(chunkDescsOffset),
			savedFirstTime:   clientmodel.Timestamp(savedFirstTime),
			headChunkClosed:  persistWatermark >= numChunkDescs,
		}
	}
	return sm, chunksToPersist, nil
}
Exemple #7
0
func (t *Target) scrape(sampleAppender storage.SampleAppender) (err error) {
	start := time.Now()
	baseLabels := t.BaseLabels()

	defer func() {
		t.status.setLastError(err)
		recordScrapeHealth(sampleAppender, clientmodel.TimestampFromTime(start), baseLabels, t.status.Health(), time.Since(start))
	}()

	req, err := http.NewRequest("GET", t.URL().String(), nil)
	if err != nil {
		panic(err)
	}
	req.Header.Add("Accept", acceptHeader)

	resp, err := t.httpClient.Do(req)
	if err != nil {
		return err
	}
	defer resp.Body.Close()
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("server returned HTTP status %s", resp.Status)
	}

	processor, err := extraction.ProcessorForRequestHeader(resp.Header)
	if err != nil {
		return err
	}

	t.ingestedSamples = make(chan clientmodel.Samples, ingestedSamplesCap)

	processOptions := &extraction.ProcessOptions{
		Timestamp: clientmodel.TimestampFromTime(start),
	}
	go func() {
		err = processor.ProcessSingle(resp.Body, t, processOptions)
		close(t.ingestedSamples)
	}()

	for samples := range t.ingestedSamples {
		for _, s := range samples {
			if t.honorLabels {
				// Merge the metric with the baseLabels for labels not already set in the
				// metric. This also considers labels explicitly set to the empty string.
				for ln, lv := range baseLabels {
					if _, ok := s.Metric[ln]; !ok {
						s.Metric[ln] = lv
					}
				}
			} else {
				// Merge the ingested metric with the base label set. On a collision the
				// value of the label is stored in a label prefixed with the exported prefix.
				for ln, lv := range baseLabels {
					if v, ok := s.Metric[ln]; ok && v != "" {
						s.Metric[clientmodel.ExportedLabelPrefix+ln] = v
					}
					s.Metric[ln] = lv
				}
			}
			// Avoid the copy in Relabel if there are no configs.
			if len(t.metricRelabelConfigs) > 0 {
				labels, err := Relabel(clientmodel.LabelSet(s.Metric), t.metricRelabelConfigs...)
				if err != nil {
					log.Errorf("Error while relabeling metric %s of instance %s: %s", s.Metric, t.url, err)
					continue
				}
				// Check if the timeseries was dropped.
				if labels == nil {
					continue
				}
				s.Metric = clientmodel.Metric(labels)
			}
			sampleAppender.Append(s)
		}
	}
	return err
}
func (p *persistence) cleanUpArchiveIndexes(
	fpToSeries map[clientmodel.Fingerprint]*memorySeries,
	fpsSeen map[clientmodel.Fingerprint]struct{},
	fpm fpMappings,
) error {
	log.Info("Cleaning up archive indexes.")
	var fp codable.Fingerprint
	var m codable.Metric
	count := 0
	if err := p.archivedFingerprintToMetrics.ForEach(func(kv index.KeyValueAccessor) error {
		count++
		if count%10000 == 0 {
			log.Infof("%d archived metrics checked.", count)
		}
		if err := kv.Key(&fp); err != nil {
			return err
		}
		_, fpSeen := fpsSeen[clientmodel.Fingerprint(fp)]
		inMemory := false
		if fpSeen {
			_, inMemory = fpToSeries[clientmodel.Fingerprint(fp)]
		}
		if !fpSeen || inMemory {
			if inMemory {
				log.Warnf("Archive clean-up: Fingerprint %v is not archived. Purging from archive indexes.", clientmodel.Fingerprint(fp))
			}
			if !fpSeen {
				log.Warnf("Archive clean-up: Fingerprint %v is unknown. Purging from archive indexes.", clientmodel.Fingerprint(fp))
			}
			// It's fine if the fp is not in the archive indexes.
			if _, err := p.archivedFingerprintToMetrics.Delete(fp); err != nil {
				return err
			}
			// Delete from timerange index, too.
			_, err := p.archivedFingerprintToTimeRange.Delete(fp)
			return err
		}
		// fp is legitimately archived. Now we need the metric to check for a mapped fingerprint.
		if err := kv.Value(&m); err != nil {
			return err
		}
		maybeAddMapping(clientmodel.Fingerprint(fp), clientmodel.Metric(m), fpm)
		// Make sure it is in timerange index, too.
		has, err := p.archivedFingerprintToTimeRange.Has(fp)
		if err != nil {
			return err
		}
		if has {
			return nil // All good.
		}
		log.Warnf("Archive clean-up: Fingerprint %v is not in time-range index. Unarchiving it for recovery.")
		// Again, it's fine if fp is not in the archive index.
		if _, err := p.archivedFingerprintToMetrics.Delete(fp); err != nil {
			return err
		}
		cds, err := p.loadChunkDescs(clientmodel.Fingerprint(fp), 0)
		if err != nil {
			return err
		}
		series := newMemorySeries(clientmodel.Metric(m), cds, p.seriesFileModTime(clientmodel.Fingerprint(fp)))
		fpToSeries[clientmodel.Fingerprint(fp)] = series
		return nil
	}); err != nil {
		return err
	}
	count = 0
	if err := p.archivedFingerprintToTimeRange.ForEach(func(kv index.KeyValueAccessor) error {
		count++
		if count%10000 == 0 {
			log.Infof("%d archived time ranges checked.", count)
		}
		if err := kv.Key(&fp); err != nil {
			return err
		}
		has, err := p.archivedFingerprintToMetrics.Has(fp)
		if err != nil {
			return err
		}
		if has {
			return nil // All good.
		}
		log.Warnf("Archive clean-up: Purging unknown fingerprint %v in time-range index.", fp)
		deleted, err := p.archivedFingerprintToTimeRange.Delete(fp)
		if err != nil {
			return err
		}
		if !deleted {
			log.Errorf("Fingerprint %v to be deleted from archivedFingerprintToTimeRange not found. This should never happen.", fp)
		}
		return nil
	}); err != nil {
		return err
	}
	log.Info("Clean-up of archive indexes complete.")
	return nil
}
Exemple #9
0
// loadSeriesMapAndHeads loads the fingerprint to memory-series mapping and all
// open (non-full) head chunks. If recoverable corruption is detected, or if the
// dirty flag was set from the beginning, crash recovery is run, which might
// take a while. If an unrecoverable error is encountered, it is returned. Call
// this method during start-up while nothing else is running in storage
// land. This method is utterly goroutine-unsafe.
func (p *persistence) loadSeriesMapAndHeads() (sm *seriesMap, err error) {
	var chunksTotal, chunkDescsTotal int64
	fingerprintToSeries := make(map[clientmodel.Fingerprint]*memorySeries)
	sm = &seriesMap{m: fingerprintToSeries}

	defer func() {
		if sm != nil && p.dirty {
			glog.Warning("Persistence layer appears dirty.")
			err = p.recoverFromCrash(fingerprintToSeries)
			if err != nil {
				sm = nil
			}
		}
		if err == nil {
			atomic.AddInt64(&numMemChunks, chunksTotal)
			numMemChunkDescs.Add(float64(chunkDescsTotal))
		}
	}()

	f, err := os.Open(p.headsFileName())
	if os.IsNotExist(err) {
		return sm, nil
	}
	if err != nil {
		glog.Warning("Could not open heads file:", err)
		p.dirty = true
		return
	}
	defer f.Close()
	r := bufio.NewReaderSize(f, fileBufSize)

	buf := make([]byte, len(headsMagicString))
	if _, err := io.ReadFull(r, buf); err != nil {
		glog.Warning("Could not read from heads file:", err)
		p.dirty = true
		return sm, nil
	}
	magic := string(buf)
	if magic != headsMagicString {
		glog.Warningf(
			"unexpected magic string, want %q, got %q",
			headsMagicString, magic,
		)
		p.dirty = true
		return
	}
	if version, err := binary.ReadVarint(r); version != headsFormatVersion || err != nil {
		glog.Warningf("unknown heads format version, want %d", headsFormatVersion)
		p.dirty = true
		return sm, nil
	}
	numSeries, err := codable.DecodeUint64(r)
	if err != nil {
		glog.Warning("Could not decode number of series:", err)
		p.dirty = true
		return sm, nil
	}

	for ; numSeries > 0; numSeries-- {
		seriesFlags, err := r.ReadByte()
		if err != nil {
			glog.Warning("Could not read series flags:", err)
			p.dirty = true
			return sm, nil
		}
		headChunkPersisted := seriesFlags&flagHeadChunkPersisted != 0
		fp, err := codable.DecodeUint64(r)
		if err != nil {
			glog.Warning("Could not decode fingerprint:", err)
			p.dirty = true
			return sm, nil
		}
		var metric codable.Metric
		if err := metric.UnmarshalFromReader(r); err != nil {
			glog.Warning("Could not decode metric:", err)
			p.dirty = true
			return sm, nil
		}
		chunkDescsOffset, err := binary.ReadVarint(r)
		if err != nil {
			glog.Warning("Could not decode chunk descriptor offset:", err)
			p.dirty = true
			return sm, nil
		}
		savedFirstTime, err := binary.ReadVarint(r)
		if err != nil {
			glog.Warning("Could not decode saved first time:", err)
			p.dirty = true
			return sm, nil
		}
		numChunkDescs, err := binary.ReadVarint(r)
		if err != nil {
			glog.Warning("Could not decode number of chunk descriptors:", err)
			p.dirty = true
			return sm, nil
		}
		chunkDescs := make([]*chunkDesc, numChunkDescs)
		chunkDescsTotal += numChunkDescs

		for i := int64(0); i < numChunkDescs; i++ {
			if headChunkPersisted || i < numChunkDescs-1 {
				firstTime, err := binary.ReadVarint(r)
				if err != nil {
					glog.Warning("Could not decode first time:", err)
					p.dirty = true
					return sm, nil
				}
				lastTime, err := binary.ReadVarint(r)
				if err != nil {
					glog.Warning("Could not decode last time:", err)
					p.dirty = true
					return sm, nil
				}
				chunkDescs[i] = &chunkDesc{
					chunkFirstTime: clientmodel.Timestamp(firstTime),
					chunkLastTime:  clientmodel.Timestamp(lastTime),
				}
			} else {
				// Non-persisted head chunk.
				chunksTotal++
				chunkType, err := r.ReadByte()
				if err != nil {
					glog.Warning("Could not decode chunk type:", err)
					p.dirty = true
					return sm, nil
				}
				chunk := chunkForType(chunkType)
				if err := chunk.unmarshal(r); err != nil {
					glog.Warning("Could not decode chunk type:", err)
					p.dirty = true
					return sm, nil
				}
				chunkDescs[i] = newChunkDesc(chunk)
			}
		}

		fingerprintToSeries[clientmodel.Fingerprint(fp)] = &memorySeries{
			metric:             clientmodel.Metric(metric),
			chunkDescs:         chunkDescs,
			chunkDescsOffset:   int(chunkDescsOffset),
			savedFirstTime:     clientmodel.Timestamp(savedFirstTime),
			headChunkPersisted: headChunkPersisted,
		}
	}
	return sm, nil
}
Exemple #10
0
func (p *persistence) cleanUpArchiveIndexes(
	fpToSeries map[clientmodel.Fingerprint]*memorySeries,
	fpsSeen map[clientmodel.Fingerprint]struct{},
) error {
	glog.Info("Cleaning up archive indexes.")
	var fp codable.Fingerprint
	var m codable.Metric
	count := 0
	if err := p.archivedFingerprintToMetrics.ForEach(func(kv index.KeyValueAccessor) error {
		count++
		if count%10000 == 0 {
			glog.Infof("%d archived metrics checked.", count)
		}
		if err := kv.Key(&fp); err != nil {
			return err
		}
		_, fpSeen := fpsSeen[clientmodel.Fingerprint(fp)]
		inMemory := false
		if fpSeen {
			_, inMemory = fpToSeries[clientmodel.Fingerprint(fp)]
		}
		if !fpSeen || inMemory {
			if inMemory {
				glog.Warningf("Archive clean-up: Fingerprint %v is not archived. Purging from archive indexes.", clientmodel.Fingerprint(fp))
			}
			if !fpSeen {
				glog.Warningf("Archive clean-up: Fingerprint %v is unknown. Purging from archive indexes.", clientmodel.Fingerprint(fp))
			}
			if err := p.archivedFingerprintToMetrics.Delete(fp); err != nil {
				return err
			}
			// Delete from timerange index, too.
			p.archivedFingerprintToTimeRange.Delete(fp)
			// TODO: Ignoring errors here as fp might not be in
			// timerange index (which is good) but which would
			// return an error. Delete signature could be changed
			// like the Get signature to detect a real error.
			return nil
		}
		// fp is legitimately archived. Make sure it is in timerange index, too.
		has, err := p.archivedFingerprintToTimeRange.Has(fp)
		if err != nil {
			return err
		}
		if has {
			return nil // All good.
		}
		glog.Warningf("Archive clean-up: Fingerprint %v is not in time-range index. Unarchiving it for recovery.")
		if err := p.archivedFingerprintToMetrics.Delete(fp); err != nil {
			return err
		}
		if err := kv.Value(&m); err != nil {
			return err
		}
		series := newMemorySeries(clientmodel.Metric(m), false, math.MinInt64)
		cds, err := p.loadChunkDescs(clientmodel.Fingerprint(fp), clientmodel.Now())
		if err != nil {
			return err
		}
		series.chunkDescs = cds
		series.chunkDescsOffset = 0
		fpToSeries[clientmodel.Fingerprint(fp)] = series
		return nil
	}); err != nil {
		return err
	}
	count = 0
	if err := p.archivedFingerprintToTimeRange.ForEach(func(kv index.KeyValueAccessor) error {
		count++
		if count%10000 == 0 {
			glog.Infof("%d archived time ranges checked.", count)
		}
		if err := kv.Key(&fp); err != nil {
			return err
		}
		has, err := p.archivedFingerprintToMetrics.Has(fp)
		if err != nil {
			return err
		}
		if has {
			return nil // All good.
		}
		glog.Warningf("Archive clean-up: Purging unknown fingerprint %v in time-range index.", fp)
		if err := p.archivedFingerprintToTimeRange.Delete(fp); err != nil {
			return err
		}
		return nil
	}); err != nil {
		return err
	}
	glog.Info("Clean-up of archive indexes complete.")
	return nil
}