예제 #1
0
func (dms *DiskMetricStore) processWriteRequest(wr WriteRequest) {
	dms.lock.Lock()
	defer dms.lock.Unlock()

	key := model.LabelsToSignature(wr.Labels)

	if wr.MetricFamilies == nil {
		// Delete.
		delete(dms.metricGroups, key)
		return
	}
	// Update.
	for name, mf := range wr.MetricFamilies {
		group, ok := dms.metricGroups[key]
		if !ok {
			group = MetricGroup{
				Labels:  wr.Labels,
				Metrics: NameToTimestampedMetricFamilyMap{},
			}
			dms.metricGroups[key] = group
		}
		group.Metrics[name] = TimestampedMetricFamily{
			Timestamp:    wr.Timestamp,
			MetricFamily: mf,
		}
	}
}
예제 #2
0
파일: parse.go 프로젝트: jameswei/xcodis
// readingValue represents the state where the last byte read (now in
// p.currentByte) is the first byte of the sample value (i.e. a float).
func (p *Parser) readingValue() stateFn {
	// When we are here, we have read all the labels, so for the
	// infamous special case of a summary, we can finally find out
	// if the metric already exists.
	if p.currentMF.GetType() == dto.MetricType_SUMMARY {
		signature := model.LabelsToSignature(p.currentLabels)
		if summary := p.summaries[signature]; summary != nil {
			p.currentMetric = summary
		} else {
			p.summaries[signature] = p.currentMetric
			p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
		}
	} else {
		p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
	}
	if p.readTokenUntilWhitespace(); p.err != nil {
		return nil // Unexpected end of input.
	}
	value, err := strconv.ParseFloat(p.currentToken.String(), 64)
	if err != nil {
		// Create a more helpful error message.
		p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
		return nil
	}
	switch p.currentMF.GetType() {
	case dto.MetricType_COUNTER:
		p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
	case dto.MetricType_GAUGE:
		p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
	case dto.MetricType_UNTYPED:
		p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
	case dto.MetricType_SUMMARY:
		// *sigh*
		if p.currentMetric.Summary == nil {
			p.currentMetric.Summary = &dto.Summary{}
		}
		switch {
		case p.currentIsSummaryCount:
			p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
		case p.currentIsSummarySum:
			p.currentMetric.Summary.SampleSum = proto.Float64(value)
		case !math.IsNaN(p.currentQuantile):
			p.currentMetric.Summary.Quantile = append(
				p.currentMetric.Summary.Quantile,
				&dto.Quantile{
					Quantile: proto.Float64(p.currentQuantile),
					Value:    proto.Float64(value),
				},
			)
		}
	default:
		p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
	}
	if p.currentByte == '\n' {
		return p.startOfLine
	}
	return p.startTimestamp
}
예제 #3
0
func addGroup(
	mg GroupingKeyToMetricGroup,
	groupingLabels map[string]string,
	metrics NameToTimestampedMetricFamilyMap,
) {
	mg[model.LabelsToSignature(groupingLabels)] = MetricGroup{
		Labels:  groupingLabels,
		Metrics: metrics,
	}
}
예제 #4
0
func (dms *DiskMetricStore) legacyRestore() error {
	if dms.persistenceFile == "" {
		return nil
	}
	f, err := os.Open(dms.persistenceFile)
	if os.IsNotExist(err) {
		return nil
	}
	if err != nil {
		return err
	}
	defer f.Close()

	var tmf TimestampedMetricFamily
	for d := gob.NewDecoder(f); err == nil; tmf, err = legacyReadTimestampedMetricFamily(d) {
		if len(tmf.MetricFamily.GetMetric()) == 0 {
			continue // No metric in this MetricFamily.
		}
		name := tmf.MetricFamily.GetName()
		var job, instance string
		for _, lp := range tmf.MetricFamily.GetMetric()[0].GetLabel() {
			// With the way the pushgateway persists things, all
			// metrics in a single MetricFamily proto message share
			// the same job and instance label. So we only have to
			// peek at the first metric to find it.
			switch lp.GetName() {
			case "job":
				job = lp.GetValue()
			case "instance":
				instance = lp.GetValue()
			}
			if job != "" && instance != "" {
				break
			}
		}
		labels := map[string]string{
			"job":      job,
			"instance": instance,
		}
		key := model.LabelsToSignature(labels)
		group, ok := dms.metricGroups[key]
		if !ok {
			group = MetricGroup{
				Labels:  labels,
				Metrics: NameToTimestampedMetricFamilyMap{},
			}
			dms.metricGroups[key] = group
		}
		group.Metrics[name] = tmf
	}
	if err == io.EOF {
		return nil
	}
	return err
}
예제 #5
0
func TestAddDeletePersistRestore(t *testing.T) {
	tempDir, err := ioutil.TempDir("", "diskmetricstore.TestAddDeletePersistRestore.")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(tempDir)
	fileName := path.Join(tempDir, "persistence")
	dms := NewDiskMetricStore(fileName, 100*time.Millisecond)

	// Submit a single simple metric family.
	ts1 := time.Now()
	dms.SubmitWriteRequest(WriteRequest{
		Labels: map[string]string{
			"job":      "job1",
			"instance": "instance1",
		},
		Timestamp:      ts1,
		MetricFamilies: map[string]*dto.MetricFamily{"mf3": mf3},
	})
	time.Sleep(time.Microsecond) // Give loop() time to process.
	if err := checkMetricFamilies(dms, mf3); err != nil {
		t.Error(err)
	}

	// Submit two metric families for a different instance.
	ts2 := ts1.Add(time.Second)
	dms.SubmitWriteRequest(WriteRequest{
		Labels: map[string]string{
			"job":      "job1",
			"instance": "instance2",
		},
		Timestamp:      ts2,
		MetricFamilies: map[string]*dto.MetricFamily{"mf1": mf1b, "mf2": mf2},
	})
	time.Sleep(time.Microsecond) // Give loop() time to process.
	if err := checkMetricFamilies(dms, mf1b, mf2, mf3); err != nil {
		t.Error(err)
	}

	// Submit a metric family with the same name for the same job/instance again.
	// Should overwrite the previous metric family for the same job/instance
	ts3 := ts2.Add(time.Second)
	dms.SubmitWriteRequest(WriteRequest{
		Labels: map[string]string{
			"job":      "job1",
			"instance": "instance2",
		},
		Timestamp:      ts3,
		MetricFamilies: map[string]*dto.MetricFamily{"mf1": mf1a},
	})
	time.Sleep(time.Microsecond) // Give loop() time to process.
	if err := checkMetricFamilies(dms, mf1a, mf2, mf3); err != nil {
		t.Error(err)
	}

	// Shutdown the dms.
	if err := dms.Shutdown(); err != nil {
		t.Fatal(err)
	}

	// Load it again.
	dms = NewDiskMetricStore(fileName, 100*time.Millisecond)
	if err := checkMetricFamilies(dms, mf1a, mf2, mf3); err != nil {
		t.Error(err)
	}
	// Spot-check timestamp.
	tmf := dms.metricGroups[model.LabelsToSignature(map[string]string{
		"job":      "job1",
		"instance": "instance2",
	})].Metrics["mf1"]
	if expected, got := ts3, tmf.Timestamp; expected != got {
		t.Errorf("Expected timestamp %v, got %v.", expected, got)
	}

	// Delete a group.
	dms.SubmitWriteRequest(WriteRequest{
		Labels: map[string]string{
			"job":      "job1",
			"instance": "instance1",
		},
	})
	time.Sleep(time.Microsecond) // Give loop() time to process.
	if err := checkMetricFamilies(dms, mf1a, mf2); err != nil {
		t.Error(err)
	}

	// Submit another one.
	ts4 := ts3.Add(time.Second)
	dms.SubmitWriteRequest(WriteRequest{
		Labels: map[string]string{
			"job":      "job3",
			"instance": "instance2",
		},
		Timestamp:      ts4,
		MetricFamilies: map[string]*dto.MetricFamily{"mf4": mf4},
	})
	time.Sleep(time.Microsecond) // Give loop() time to process.
	if err := checkMetricFamilies(dms, mf1a, mf2, mf4); err != nil {
		t.Error(err)
	}

	// Delete a job does not remove anything because there is no suitable
	// grouping.
	dms.SubmitWriteRequest(WriteRequest{
		Labels: map[string]string{
			"job": "job1",
		},
	})
	time.Sleep(time.Microsecond) // Give loop() time to process.
	if err := checkMetricFamilies(dms, mf1a, mf2, mf4); err != nil {
		t.Error(err)
	}

	// Delete another group.
	dms.SubmitWriteRequest(WriteRequest{
		Labels: map[string]string{
			"job":      "job3",
			"instance": "instance2",
		},
	})
	time.Sleep(time.Microsecond) // Give loop() time to process.
	if err := checkMetricFamilies(dms, mf1a, mf2); err != nil {
		t.Error(err)
	}
	// Check that no empty  map entry for job3 was left behind.
	if _, stillExists := dms.metricGroups[model.LabelsToSignature(map[string]string{
		"job":      "job3",
		"instance": "instance2",
	})]; stillExists {
		t.Error("An instance map for 'job3' still exists.")
	}

	// Shutdown the dms again, directly after a number of write request
	// (to check draining).
	for i := 0; i < 10; i++ {
		dms.SubmitWriteRequest(WriteRequest{
			Labels: map[string]string{
				"job":      "job3",
				"instance": "instance2",
			},
			Timestamp:      ts4,
			MetricFamilies: map[string]*dto.MetricFamily{"mf4": mf4},
		})
	}
	if err := dms.Shutdown(); err != nil {
		t.Fatal(err)
	}
	if err := checkMetricFamilies(dms, mf1a, mf2, mf4); err != nil {
		t.Error(err)
	}
}