Example #1
0
func (p *prometheus) delete(olderThan time.Duration, batchSize int) error {
	select {
	case s, ok := <-p.curationSema:
		if !ok {
			glog.Warning("Prometheus is shutting down; no more curation runs are allowed.")
			return nil
		}

		defer func() {
			p.curationSema <- s
		}()

	default:
		glog.Warningf("Deferred deletion for %s due to existing operation.", olderThan)

		return nil
	}

	processor := tiered.NewDeletionProcessor(&tiered.DeletionProcessorOptions{
		MaximumMutationPoolBatch: batchSize,
	})
	defer processor.Close()

	curator := tiered.NewCurator(&tiered.CuratorOptions{
		Stop: p.stopBackgroundOperations,

		ViewQueue: p.storage.ViewQueue,
	})
	defer curator.Close()

	return curator.Run(olderThan, clientmodel.Now(), processor, p.storage.DiskStorage.CurationRemarks, p.storage.DiskStorage.MetricSamples, p.storage.DiskStorage.MetricHighWatermarks, p.curationState)
}
Example #2
0
func (s *memorySeriesStorage) loop() {
	checkpointTimer := time.NewTimer(s.checkpointInterval)

	dirtySeriesCount := 0

	defer func() {
		checkpointTimer.Stop()
		log.Info("Maintenance loop stopped.")
		close(s.loopStopped)
	}()

	memoryFingerprints := s.cycleThroughMemoryFingerprints()
	archivedFingerprints := s.cycleThroughArchivedFingerprints()

loop:
	for {
		select {
		case <-s.loopStopping:
			break loop
		case <-checkpointTimer.C:
			s.persistence.checkpointSeriesMapAndHeads(s.fpToSeries, s.fpLocker)
			dirtySeriesCount = 0
			checkpointTimer.Reset(s.checkpointInterval)
		case fp := <-memoryFingerprints:
			if s.maintainMemorySeries(fp, clientmodel.Now().Add(-s.dropAfter)) {
				dirtySeriesCount++
				// Check if we have enough "dirty" series so that we need an early checkpoint.
				// However, if we are already behind persisting chunks, creating a checkpoint
				// would be counterproductive, as it would slow down chunk persisting even more,
				// while in a situation like that, where we are clearly lacking speed of disk
				// maintenance, the best we can do for crash recovery is to persist chunks as
				// quickly as possible. So only checkpoint if the storage is not in "graceful
				// degratadion mode".
				if dirtySeriesCount >= s.checkpointDirtySeriesLimit && !s.isDegraded() {
					checkpointTimer.Reset(0)
				}
			}
		case fp := <-archivedFingerprints:
			s.maintainArchivedSeries(fp, clientmodel.Now().Add(-s.dropAfter))
		}
	}
	// Wait until both channels are closed.
	for range memoryFingerprints {
	}
	for range archivedFingerprints {
	}
}
Example #3
0
func (h *Handler) executeTemplate(w http.ResponseWriter, name string, data interface{}) {
	text, err := h.getTemplate(name)
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
	}

	tmpl := template.NewTemplateExpander(text, name, data, clientmodel.Now(), h.queryEngine, h.options.PathPrefix)
	tmpl.Funcs(template_text.FuncMap{
		"since":       time.Since,
		"getConsoles": h.getConsoles,
		"pathPrefix":  func() string { return h.options.PathPrefix },
		"stripLabels": func(lset clientmodel.LabelSet, labels ...clientmodel.LabelName) clientmodel.LabelSet {
			for _, ln := range labels {
				delete(lset, ln)
			}
			return lset
		},
		"globalURL": func(u *url.URL) *url.URL {
			for _, lhr := range localhostRepresentations {
				if strings.HasPrefix(u.Host, lhr+":") {
					u.Host = strings.Replace(u.Host, lhr+":", h.options.Hostname+":", 1)
				}
			}
			return u
		},
		"healthToClass": func(th retrieval.TargetHealth) string {
			switch th {
			case retrieval.HealthUnknown:
				return "warning"
			case retrieval.HealthGood:
				return "success"
			default:
				return "danger"
			}
		},
		"alertStateToClass": func(as rules.AlertState) string {
			switch as {
			case rules.StateInactive:
				return "success"
			case rules.StatePending:
				return "warning"
			case rules.StateFiring:
				return "danger"
			default:
				panic("unknown alert state")
			}
		},
	})

	result, err := tmpl.ExpandHTML(nil)
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}
	io.WriteString(w, result)
}
Example #4
0
func TestDeltaWithEmptyElementDoesNotCrash(t *testing.T) {
	now := clientmodel.Now()
	vector := deltaImpl(now, nil, []Node{emptyRangeNode{}, &ScalarLiteral{value: 0}}).(Vector)
	if len(vector) != 0 {
		t.Fatalf("Expected empty result vector, got: %v", vector)
	}
	vector = deltaImpl(now, nil, []Node{emptyRangeNode{}, &ScalarLiteral{value: 1}}).(Vector)
	if len(vector) != 0 {
		t.Fatalf("Expected empty result vector, got: %v", vector)
	}
}
Example #5
0
func (m *ruleManager) runIteration(results chan<- clientmodel.Samples) {
	now := clientmodel.Now()
	wg := sync.WaitGroup{}

	m.Lock()
	rulesSnapshot := make([]rules.Rule, len(m.rules))
	copy(rulesSnapshot, m.rules)
	m.Unlock()

	for _, rule := range rulesSnapshot {
		wg.Add(1)
		// BUG(julius): Look at fixing thundering herd.
		go func(rule rules.Rule) {
			defer wg.Done()

			start := time.Now()
			vector, err := rule.Eval(now, m.storage)
			duration := time.Since(start)

			samples := make(clientmodel.Samples, len(vector))
			for i, s := range vector {
				samples[i] = &clientmodel.Sample{
					Metric:    s.Metric.Metric,
					Value:     s.Value,
					Timestamp: s.Timestamp,
				}
			}

			if err != nil {
				evalFailures.Inc()
				glog.Warningf("Error while evaluating rule %q: %s", rule, err)
			} else {
				m.results <- samples
			}

			switch r := rule.(type) {
			case *rules.AlertingRule:
				m.queueAlertNotifications(r, now)
				evalDuration.WithLabelValues(alertingRuleType).Observe(
					float64(duration / time.Millisecond),
				)
			case *rules.RecordingRule:
				evalDuration.WithLabelValues(recordingRuleType).Observe(
					float64(duration / time.Millisecond),
				)
			default:
				panic(fmt.Sprintf("Unknown rule type: %T", rule))
			}
		}(rule)
	}

	wg.Wait()
}
Example #6
0
func (t *target) scrape(ingester extraction.Ingester) (err error) {
	timestamp := clientmodel.Now()
	defer func(start time.Time) {
		t.Lock() // Writing t.state and t.lastError requires the lock.
		if err == nil {
			t.state = Alive
		} else {
			t.state = Unreachable
		}
		t.lastError = err
		t.Unlock()
		t.recordScrapeHealth(ingester, timestamp, err == nil, time.Since(start))
	}(time.Now())

	req, err := http.NewRequest("GET", t.URL(), nil)
	if err != nil {
		panic(err)
	}
	req.Header.Add("Accept", acceptHeader)

	resp, err := t.httpClient.Do(req)
	if err != nil {
		return err
	}
	defer resp.Body.Close()
	if resp.StatusCode != http.StatusOK {
		return fmt.Errorf("server returned HTTP status %s", resp.Status)
	}

	processor, err := extraction.ProcessorForRequestHeader(resp.Header)
	if err != nil {
		return err
	}

	baseLabels := clientmodel.LabelSet{InstanceLabel: clientmodel.LabelValue(t.URL())}
	for baseLabel, baseValue := range t.baseLabels {
		baseLabels[baseLabel] = baseValue
	}

	i := &MergeLabelsIngester{
		Labels:          baseLabels,
		CollisionPrefix: clientmodel.ExporterLabelPrefix,

		Ingester: ingester,
	}
	processOptions := &extraction.ProcessOptions{
		Timestamp: timestamp,
	}
	return processor.ProcessSingle(resp.Body, i, processOptions)
}
Example #7
0
func (m *Manager) runIteration() {
	now := clientmodel.Now()
	wg := sync.WaitGroup{}

	m.Lock()
	rulesSnapshot := make([]Rule, len(m.rules))
	copy(rulesSnapshot, m.rules)
	m.Unlock()

	for _, rule := range rulesSnapshot {
		wg.Add(1)
		// BUG(julius): Look at fixing thundering herd.
		go func(rule Rule) {
			defer wg.Done()

			start := time.Now()
			vector, err := rule.eval(now, m.queryEngine)
			duration := time.Since(start)

			if err != nil {
				evalFailures.Inc()
				log.Warnf("Error while evaluating rule %q: %s", rule, err)
				return
			}

			switch r := rule.(type) {
			case *AlertingRule:
				m.queueAlertNotifications(r, now)
				evalDuration.WithLabelValues(ruleTypeAlerting).Observe(
					float64(duration / time.Millisecond),
				)
			case *RecordingRule:
				evalDuration.WithLabelValues(ruleTypeRecording).Observe(
					float64(duration / time.Millisecond),
				)
			default:
				panic(fmt.Errorf("Unknown rule type: %T", rule))
			}

			for _, s := range vector {
				m.sampleAppender.Append(&clientmodel.Sample{
					Metric:    s.Metric.Metric,
					Value:     s.Value,
					Timestamp: s.Timestamp,
				})
			}
		}(rule)
	}
	wg.Wait()
}
Example #8
0
func (h *Handler) consoles(w http.ResponseWriter, r *http.Request) {
	ctx := route.Context(r)
	name := route.Param(ctx, "filepath")

	file, err := http.Dir(h.options.ConsoleTemplatesPath).Open(name)
	if err != nil {
		http.Error(w, err.Error(), http.StatusNotFound)
		return
	}
	text, err := ioutil.ReadAll(file)
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}

	// Provide URL parameters as a map for easy use. Advanced users may have need for
	// parameters beyond the first, so provide RawParams.
	rawParams, err := url.ParseQuery(r.URL.RawQuery)
	if err != nil {
		http.Error(w, err.Error(), http.StatusBadRequest)
		return
	}
	params := map[string]string{}
	for k, v := range rawParams {
		params[k] = v[0]
	}
	data := struct {
		RawParams url.Values
		Params    map[string]string
		Path      string
	}{
		RawParams: rawParams,
		Params:    params,
		Path:      name,
	}

	tmpl := template.NewTemplateExpander(string(text), "__console_"+name, data, clientmodel.Now(), h.queryEngine, h.options.PathPrefix)
	filenames, err := filepath.Glob(h.options.ConsoleLibrariesPath + "/*.lib")
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}
	result, err := tmpl.ExpandHTML(filenames)
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}
	io.WriteString(w, result)
}
Example #9
0
func (h *Handler) executeTemplate(w http.ResponseWriter, name string, data interface{}) {
	text, err := h.getTemplate(name)
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
	}

	tmpl := template.NewTemplateExpander(text, name, data, clientmodel.Now(), h.queryEngine, h.options.ExternalURL.Path)
	tmpl.Funcs(tmplFuncs(h.consolesPath(), h.options))

	result, err := tmpl.ExpandHTML(nil)
	if err != nil {
		http.Error(w, err.Error(), http.StatusInternalServerError)
		return
	}
	io.WriteString(w, result)
}
Example #10
0
func TestTargetRecordScrapeHealth(t *testing.T) {
	testTarget := target{
		url:        "http://example.url",
		baseLabels: clientmodel.LabelSet{clientmodel.JobLabel: "testjob"},
		httpClient: utility.NewDeadlineClient(0),
	}

	now := clientmodel.Now()
	ingester := &collectResultIngester{}
	testTarget.recordScrapeHealth(ingester, now, true, 2*time.Second)

	result := ingester.result

	if len(result) != 2 {
		t.Fatalf("Expected two samples, got %d", len(result))
	}

	actual := result[0]
	expected := &clientmodel.Sample{
		Metric: clientmodel.Metric{
			clientmodel.MetricNameLabel: scrapeHealthMetricName,
			InstanceLabel:               "http://example.url",
			clientmodel.JobLabel:        "testjob",
		},
		Timestamp: now,
		Value:     1,
	}

	if !actual.Equal(expected) {
		t.Fatalf("Expected and actual samples not equal. Expected: %v, actual: %v", expected, actual)
	}

	actual = result[1]
	expected = &clientmodel.Sample{
		Metric: clientmodel.Metric{
			clientmodel.MetricNameLabel: scrapeDurationMetricName,
			InstanceLabel:               "http://example.url",
			clientmodel.JobLabel:        "testjob",
		},
		Timestamp: now,
		Value:     2.0,
	}

	if !actual.Equal(expected) {
		t.Fatalf("Expected and actual samples not equal. Expected: %v, actual: %v", expected, actual)
	}
}
Example #11
0
// NewIterator implements Storage.
func (s *memorySeriesStorage) NewIterator(fp clientmodel.Fingerprint) SeriesIterator {
	s.fpLocker.Lock(fp)
	defer s.fpLocker.Unlock(fp)

	series, ok := s.fpToSeries.get(fp)
	if !ok {
		// Oops, no series for fp found. That happens if, after
		// preloading is done, the whole series is identified as old
		// enough for purging and hence purged for good. As there is no
		// data left to iterate over, return an iterator that will never
		// return any values.
		return nopSeriesIterator{}
	}
	return &boundedIterator{
		it:    series.newIterator(),
		start: clientmodel.Now().Add(-s.dropAfter),
	}
}
Example #12
0
func (t *target) Scrape(earliest time.Time, ingester extraction.Ingester) error {
	now := clientmodel.Now()
	futureState := t.state
	err := t.scrape(now, ingester)
	if err != nil {
		t.recordScrapeHealth(ingester, now, false)
		futureState = UNREACHABLE
	} else {
		t.recordScrapeHealth(ingester, now, true)
		futureState = ALIVE
	}

	t.scheduler.Reschedule(earliest, futureState)
	t.state = futureState
	t.lastError = err

	return err
}
Example #13
0
func TestTargetRecordScrapeHealth(t *testing.T) {
	testTarget := newTestTarget("example.url:80", 0, clientmodel.LabelSet{clientmodel.JobLabel: "testjob"})

	now := clientmodel.Now()
	appender := &collectResultAppender{}
	testTarget.status.setLastError(nil)
	recordScrapeHealth(appender, now, testTarget.BaseLabels(), testTarget.status.Health(), 2*time.Second)

	result := appender.result

	if len(result) != 2 {
		t.Fatalf("Expected two samples, got %d", len(result))
	}

	actual := result[0]
	expected := &clientmodel.Sample{
		Metric: clientmodel.Metric{
			clientmodel.MetricNameLabel: scrapeHealthMetricName,
			clientmodel.InstanceLabel:   "example.url:80",
			clientmodel.JobLabel:        "testjob",
		},
		Timestamp: now,
		Value:     1,
	}

	if !actual.Equal(expected) {
		t.Fatalf("Expected and actual samples not equal. Expected: %v, actual: %v", expected, actual)
	}

	actual = result[1]
	expected = &clientmodel.Sample{
		Metric: clientmodel.Metric{
			clientmodel.MetricNameLabel: scrapeDurationMetricName,
			clientmodel.InstanceLabel:   "example.url:80",
			clientmodel.JobLabel:        "testjob",
		},
		Timestamp: now,
		Value:     2.0,
	}

	if !actual.Equal(expected) {
		t.Fatalf("Expected and actual samples not equal. Expected: %v, actual: %v", expected, actual)
	}
}
Example #14
0
func (t *TieredStorage) flushMemory(ttl time.Duration) {
	flushOlderThan := clientmodel.Now().Add(-1 * ttl)

	glog.Info("Flushing samples to disk...")
	t.memoryArena.Flush(flushOlderThan, t.appendToDiskQueue)

	queueLength := len(t.appendToDiskQueue)
	if queueLength > 0 {
		samples := clientmodel.Samples{}
		for i := 0; i < queueLength; i++ {
			chunk := <-t.appendToDiskQueue
			samples = append(samples, chunk...)
		}

		glog.Infof("Writing %d samples...", len(samples))
		t.DiskStorage.AppendSamples(samples)
	}

	glog.Info("Done flushing.")
}
Example #15
0
func (m *ruleManager) runIteration(results chan<- *extraction.Result) {
	now := clientmodel.Now()
	wg := sync.WaitGroup{}

	m.Lock()
	rules := make([]Rule, len(m.rules))
	copy(rules, m.rules)
	m.Unlock()

	for _, rule := range rules {
		wg.Add(1)
		// BUG(julius): Look at fixing thundering herd.
		go func(rule Rule) {
			defer wg.Done()

			start := time.Now()
			vector, err := rule.Eval(now, m.storage)
			duration := time.Since(start)

			samples := make(clientmodel.Samples, len(vector))
			copy(samples, vector)
			m.results <- &extraction.Result{
				Samples: samples,
				Err:     err,
			}

			switch r := rule.(type) {
			case *AlertingRule:
				m.queueAlertNotifications(r)
				recordOutcome(alertingRuleType, duration)
			case *RecordingRule:
				recordOutcome(recordingRuleType, duration)
			default:
				panic(fmt.Sprintf("Unknown rule type: %T", rule))
			}
		}(rule)
	}

	wg.Wait()
}
Example #16
0
func TestGetMetricForFingerprintCachesCopyOfMetric(t *testing.T) {
	ts, closer := NewTestTieredStorage(t)
	defer closer.Close()

	m := clientmodel.Metric{
		clientmodel.MetricNameLabel: "testmetric",
	}
	samples := clientmodel.Samples{
		&clientmodel.Sample{
			Metric:    m,
			Value:     0,
			Timestamp: clientmodel.Now(),
		},
	}

	if err := ts.AppendSamples(samples); err != nil {
		t.Fatal(err)
	}

	ts.Flush()

	fp := &clientmodel.Fingerprint{}
	fp.LoadFromMetric(m)
	m, err := ts.GetMetricForFingerprint(fp)
	if err != nil {
		t.Fatal(err)
	}

	m[clientmodel.MetricNameLabel] = "changedmetric"

	m, err = ts.GetMetricForFingerprint(fp)
	if err != nil {
		t.Fatal(err)
	}
	if m[clientmodel.MetricNameLabel] != "testmetric" {
		t.Fatal("Metric name label value has changed: ", m[clientmodel.MetricNameLabel])
	}
}
Example #17
0
// cycleThroughArchivedFingerprints returns a channel that emits fingerprints
// for archived series in a throttled fashion. It continues to cycle through all
// archived fingerprints until s.loopStopping is closed.
func (s *memorySeriesStorage) cycleThroughArchivedFingerprints() chan clientmodel.Fingerprint {
	archivedFingerprints := make(chan clientmodel.Fingerprint)
	go func() {
		defer close(archivedFingerprints)

		for {
			archivedFPs, err := s.persistence.fingerprintsModifiedBefore(
				clientmodel.Now().Add(-s.dropAfter),
			)
			if err != nil {
				log.Error("Failed to lookup archived fingerprint ranges: ", err)
				s.waitForNextFP(0, 1)
				continue
			}
			// Initial wait, also important if there are no FPs yet.
			if !s.waitForNextFP(len(archivedFPs), 1) {
				return
			}
			begin := time.Now()
			for _, fp := range archivedFPs {
				select {
				case archivedFingerprints <- fp:
				case <-s.loopStopping:
					return
				}
				// Never speed up maintenance of archived FPs.
				s.waitForNextFP(len(archivedFPs), 1)
			}
			if len(archivedFPs) > 0 {
				log.Infof(
					"Completed maintenance sweep through %d archived fingerprints in %v.",
					len(archivedFPs), time.Since(begin),
				)
			}
		}
	}()
	return archivedFingerprints
}
Example #18
0
func TestTargetRecordScrapeHealth(t *testing.T) {
	testTarget := target{
		scheduler:  literalScheduler{},
		address:    "http://example.url",
		baseLabels: clientmodel.LabelSet{clientmodel.JobLabel: "testjob"},
		httpClient: utility.NewDeadlineClient(0),
	}

	now := clientmodel.Now()
	ingester := &collectResultIngester{}
	testTarget.recordScrapeHealth(ingester, now, true)

	result := ingester.result

	if len(result.Samples) != 1 {
		t.Fatalf("Expected one sample, got %d", len(result.Samples))
	}

	actual := result.Samples[0]
	expected := &clientmodel.Sample{
		Metric: clientmodel.Metric{
			clientmodel.MetricNameLabel: ScrapeHealthMetricName,
			InstanceLabel:               "http://example.url",
			clientmodel.JobLabel:        "testjob",
		},
		Timestamp: now,
		Value:     1,
	}

	if result.Err != nil {
		t.Fatalf("Got unexpected error: %v", result.Err)
	}

	if !actual.Equal(expected) {
		t.Fatalf("Expected and actual samples not equal. Expected: %v, actual: %v", expected, actual)
	}
}
Example #19
0
func GetFingerprintsForLabelSetUsesAndForLabelMatchingTests(p metric.Persistence, t test.Tester) {
	metrics := []clientmodel.LabelSet{
		{clientmodel.MetricNameLabel: "request_metrics_latency_equal_tallying_microseconds", "instance": "http://localhost:9090/metrics.json", "percentile": "0.010000"},
		{clientmodel.MetricNameLabel: "requests_metrics_latency_equal_accumulating_microseconds", "instance": "http://localhost:9090/metrics.json", "percentile": "0.010000"},
		{clientmodel.MetricNameLabel: "requests_metrics_latency_logarithmic_accumulating_microseconds", "instance": "http://localhost:9090/metrics.json", "percentile": "0.010000"},
		{clientmodel.MetricNameLabel: "requests_metrics_latency_logarithmic_tallying_microseconds", "instance": "http://localhost:9090/metrics.json", "percentile": "0.010000"},
		{clientmodel.MetricNameLabel: "targets_healthy_scrape_latency_ms", "instance": "http://localhost:9090/metrics.json", "percentile": "0.010000"},
	}

	for _, metric := range metrics {
		m := clientmodel.Metric{}

		for k, v := range metric {
			m[clientmodel.LabelName(k)] = clientmodel.LabelValue(v)
		}

		testAppendSamples(p, &clientmodel.Sample{
			Value:     clientmodel.SampleValue(0.0),
			Timestamp: clientmodel.Now(),
			Metric:    m,
		}, t)
	}

	labelSet := clientmodel.LabelSet{
		clientmodel.MetricNameLabel: "targets_healthy_scrape_latency_ms",
		"percentile":                "0.010000",
	}

	fingerprints, err := p.GetFingerprintsForLabelMatchers(labelMatchersFromLabelSet(labelSet))
	if err != nil {
		t.Errorf("could not get labels: %s", err)
	}

	if len(fingerprints) != 1 {
		t.Errorf("did not get a single metric as is expected, got %s", fingerprints)
	}
}
package extraction

import (
	"bytes"
	"errors"
	"io/ioutil"
	"os"
	"path"
	"runtime"
	"sort"
	"testing"

	"github.com/prometheus/client_golang/model"
)

var test002Time = model.Now()

type testProcessor002ProcessScenario struct {
	in               string
	expected, actual []model.Samples
	err              error
}

func (s *testProcessor002ProcessScenario) Ingest(samples model.Samples) error {
	s.actual = append(s.actual, samples)
	return nil
}

func (s *testProcessor002ProcessScenario) test(t testing.TB, set int) {
	reader, err := os.Open(path.Join("fixtures", s.in))
	if err != nil {
Example #21
0
func TestTruncateBefore(t *testing.T) {
	type in struct {
		values metric.Values
		time   clientmodel.Timestamp
	}
	instant := clientmodel.Now()
	var scenarios = []struct {
		in  in
		out metric.Values
	}{
		{
			in: in{
				time: instant,
				values: metric.Values{
					{
						Value:     0,
						Timestamp: instant,
					},
					{
						Value:     1,
						Timestamp: instant.Add(time.Second),
					},
					{
						Value:     2,
						Timestamp: instant.Add(2 * time.Second),
					},
					{
						Value:     3,
						Timestamp: instant.Add(3 * time.Second),
					},
					{
						Value:     4,
						Timestamp: instant.Add(4 * time.Second),
					},
				},
			},
			out: metric.Values{
				{
					Value:     0,
					Timestamp: instant,
				},
				{
					Value:     1,
					Timestamp: instant.Add(time.Second),
				},
				{
					Value:     2,
					Timestamp: instant.Add(2 * time.Second),
				},
				{
					Value:     3,
					Timestamp: instant.Add(3 * time.Second),
				},
				{
					Value:     4,
					Timestamp: instant.Add(4 * time.Second),
				},
			},
		},
		{
			in: in{
				time: instant.Add(2 * time.Second),
				values: metric.Values{
					{
						Value:     0,
						Timestamp: instant,
					},
					{
						Value:     1,
						Timestamp: instant.Add(time.Second),
					},
					{
						Value:     2,
						Timestamp: instant.Add(2 * time.Second),
					},
					{
						Value:     3,
						Timestamp: instant.Add(3 * time.Second),
					},
					{
						Value:     4,
						Timestamp: instant.Add(4 * time.Second),
					},
				},
			},
			out: metric.Values{
				{
					Value:     1,
					Timestamp: instant.Add(time.Second),
				},
				{
					Value:     2,
					Timestamp: instant.Add(2 * time.Second),
				},
				{
					Value:     3,
					Timestamp: instant.Add(3 * time.Second),
				},
				{
					Value:     4,
					Timestamp: instant.Add(4 * time.Second),
				},
			},
		},
		{
			in: in{
				time: instant.Add(5 * time.Second),
				values: metric.Values{
					{
						Value:     0,
						Timestamp: instant,
					},
					{
						Value:     1,
						Timestamp: instant.Add(time.Second),
					},
					{
						Value:     2,
						Timestamp: instant.Add(2 * time.Second),
					},
					{
						Value:     3,
						Timestamp: instant.Add(3 * time.Second),
					},
					{
						Value:     4,
						Timestamp: instant.Add(4 * time.Second),
					},
				},
			},
			out: metric.Values{
				// Preserve the last value in case it needs to be used for the next set.
				{
					Value:     4,
					Timestamp: instant.Add(4 * time.Second),
				},
			},
		},
	}

	for i, scenario := range scenarios {
		actual := chunk(scenario.in.values).TruncateBefore(scenario.in.time)

		if len(actual) != len(scenario.out) {
			t.Fatalf("%d. expected length of %d, got %d", i, len(scenario.out), len(actual))
		}

		for j, actualValue := range actual {
			if !actualValue.Equal(&scenario.out[j]) {
				t.Fatalf("%d.%d. expected %s, got %s", i, j, scenario.out[j], actualValue)
			}
		}
	}
}
// See the License for the specific language governing permissions and
// limitations under the License.

package extraction

import (
	"errors"
	"os"
	"path"
	"sort"
	"testing"

	"github.com/prometheus/client_golang/model"
)

var test001Time = model.Now()

type testProcessor001ProcessScenario struct {
	in               string
	expected, actual []model.Samples
	err              error
}

func (s *testProcessor001ProcessScenario) Ingest(samples model.Samples) error {
	s.actual = append(s.actual, samples)
	return nil
}

func (s *testProcessor001ProcessScenario) test(t testing.TB, set int) {
	reader, err := os.Open(path.Join("fixtures", s.in))
	if err != nil {
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package extraction

import (
	"sort"
	"strings"
	"testing"

	"github.com/prometheus/client_golang/model"
)

var testTime = model.Now()

type metricFamilyProcessorScenario struct {
	in               string
	expected, actual []model.Samples
}

func (s *metricFamilyProcessorScenario) Ingest(samples model.Samples) error {
	s.actual = append(s.actual, samples)
	return nil
}

func (s *metricFamilyProcessorScenario) test(t *testing.T, set int) {
	i := strings.NewReader(s.in)

	o := &ProcessOptions{
Example #24
0
func createRandomSamples(metricName string, minLen int) clientmodel.Samples {
	type valueCreator func() clientmodel.SampleValue
	type deltaApplier func(clientmodel.SampleValue) clientmodel.SampleValue

	var (
		maxMetrics         = 5
		maxStreakLength    = 500
		maxTimeDelta       = 10000
		maxTimeDeltaFactor = 10
		timestamp          = clientmodel.Now() - clientmodel.Timestamp(maxTimeDelta*maxTimeDeltaFactor*minLen/4) // So that some timestamps are in the future.
		generators         = []struct {
			createValue valueCreator
			applyDelta  []deltaApplier
		}{
			{ // "Boolean".
				createValue: func() clientmodel.SampleValue {
					return clientmodel.SampleValue(rand.Intn(2))
				},
				applyDelta: []deltaApplier{
					func(_ clientmodel.SampleValue) clientmodel.SampleValue {
						return clientmodel.SampleValue(rand.Intn(2))
					},
				},
			},
			{ // Integer with int deltas of various byte length.
				createValue: func() clientmodel.SampleValue {
					return clientmodel.SampleValue(rand.Int63() - 1<<62)
				},
				applyDelta: []deltaApplier{
					func(v clientmodel.SampleValue) clientmodel.SampleValue {
						return clientmodel.SampleValue(rand.Intn(1<<8) - 1<<7 + int(v))
					},
					func(v clientmodel.SampleValue) clientmodel.SampleValue {
						return clientmodel.SampleValue(rand.Intn(1<<16) - 1<<15 + int(v))
					},
					func(v clientmodel.SampleValue) clientmodel.SampleValue {
						return clientmodel.SampleValue(rand.Int63n(1<<32) - 1<<31 + int64(v))
					},
				},
			},
			{ // Float with float32 and float64 deltas.
				createValue: func() clientmodel.SampleValue {
					return clientmodel.SampleValue(rand.NormFloat64())
				},
				applyDelta: []deltaApplier{
					func(v clientmodel.SampleValue) clientmodel.SampleValue {
						return v + clientmodel.SampleValue(float32(rand.NormFloat64()))
					},
					func(v clientmodel.SampleValue) clientmodel.SampleValue {
						return v + clientmodel.SampleValue(rand.NormFloat64())
					},
				},
			},
		}
	)

	// Prefill result with two samples with colliding metrics (to test fingerprint mapping).
	result := clientmodel.Samples{
		&clientmodel.Sample{
			Metric: clientmodel.Metric{
				"instance": "ip-10-33-84-73.l05.ams5.s-cloud.net:24483",
				"status":   "503",
			},
			Value:     42,
			Timestamp: timestamp,
		},
		&clientmodel.Sample{
			Metric: clientmodel.Metric{
				"instance": "ip-10-33-84-73.l05.ams5.s-cloud.net:24480",
				"status":   "500",
			},
			Value:     2010,
			Timestamp: timestamp + 1,
		},
	}

	metrics := []clientmodel.Metric{}
	for n := rand.Intn(maxMetrics); n >= 0; n-- {
		metrics = append(metrics, clientmodel.Metric{
			clientmodel.MetricNameLabel:                             clientmodel.LabelValue(metricName),
			clientmodel.LabelName(fmt.Sprintf("labelname_%d", n+1)): clientmodel.LabelValue(fmt.Sprintf("labelvalue_%d", rand.Int())),
		})
	}

	for len(result) < minLen {
		// Pick a metric for this cycle.
		metric := metrics[rand.Intn(len(metrics))]
		timeDelta := rand.Intn(maxTimeDelta) + 1
		generator := generators[rand.Intn(len(generators))]
		createValue := generator.createValue
		applyDelta := generator.applyDelta[rand.Intn(len(generator.applyDelta))]
		incTimestamp := func() { timestamp += clientmodel.Timestamp(timeDelta * (rand.Intn(maxTimeDeltaFactor) + 1)) }
		switch rand.Intn(4) {
		case 0: // A single sample.
			result = append(result, &clientmodel.Sample{
				Metric:    metric,
				Value:     createValue(),
				Timestamp: timestamp,
			})
			incTimestamp()
		case 1: // A streak of random sample values.
			for n := rand.Intn(maxStreakLength); n >= 0; n-- {
				result = append(result, &clientmodel.Sample{
					Metric:    metric,
					Value:     createValue(),
					Timestamp: timestamp,
				})
				incTimestamp()
			}
		case 2: // A streak of sample values with incremental changes.
			value := createValue()
			for n := rand.Intn(maxStreakLength); n >= 0; n-- {
				result = append(result, &clientmodel.Sample{
					Metric:    metric,
					Value:     value,
					Timestamp: timestamp,
				})
				incTimestamp()
				value = applyDelta(value)
			}
		case 3: // A streak of constant sample values.
			value := createValue()
			for n := rand.Intn(maxStreakLength); n >= 0; n-- {
				result = append(result, &clientmodel.Sample{
					Metric:    metric,
					Value:     value,
					Timestamp: timestamp,
				})
				incTimestamp()
			}
		}
	}

	return result
}
Example #25
0
func (serv MetricsService) QueryRange(w http.ResponseWriter, r *http.Request) {
	setAccessControlHeaders(w)
	w.Header().Set("Content-Type", "application/json")

	params := http_utils.GetQueryParams(r)
	expr := params.Get("expr")
	end, _ := strconv.ParseInt(params.Get("end"), 0, 64)
	duration, _ := strconv.ParseInt(params.Get("range"), 0, 64)
	step, _ := strconv.ParseInt(params.Get("step"), 0, 64)

	exprNode, err := rules.LoadExprFromString(expr)
	if err != nil {
		fmt.Fprint(w, ast.ErrorToJSON(err))
		return
	}
	if exprNode.Type() != ast.VECTOR {
		fmt.Fprint(w, ast.ErrorToJSON(errors.New("Expression does not evaluate to vector type")))
		return
	}

	if end == 0 {
		end = clientmodel.Now().Unix()
	}

	if step < 1 {
		step = 1
	}

	if end-duration < 0 {
		duration = end
	}

	// Align the start to step "tick" boundary.
	end -= end % step

	queryStats := stats.NewTimerGroup()

	evalTimer := queryStats.GetTimer(stats.TotalEvalTime).Start()
	matrix, err := ast.EvalVectorRange(
		exprNode.(ast.VectorNode),
		clientmodel.TimestampFromUnix(end-duration),
		clientmodel.TimestampFromUnix(end),
		time.Duration(step)*time.Second,
		serv.Storage,
		queryStats)
	if err != nil {
		fmt.Fprint(w, ast.ErrorToJSON(err))
		return
	}
	evalTimer.Stop()

	sortTimer := queryStats.GetTimer(stats.ResultSortTime).Start()
	sort.Sort(matrix)
	sortTimer.Stop()

	jsonTimer := queryStats.GetTimer(stats.JsonEncodeTime).Start()
	result := ast.TypedValueToJSON(matrix, "matrix")
	jsonTimer.Stop()

	glog.Infof("Range query: %s\nQuery stats:\n%s\n", expr, queryStats)
	fmt.Fprint(w, result)
}
Example #26
0
// QueryRange handles the /api/query_range endpoint.
func (serv MetricsService) QueryRange(w http.ResponseWriter, r *http.Request) {
	setAccessControlHeaders(w)
	w.Header().Set("Content-Type", "application/json")

	params := httputils.GetQueryParams(r)
	expr := params.Get("expr")

	// Input times and durations are in seconds and get converted to nanoseconds.
	endFloat, _ := strconv.ParseFloat(params.Get("end"), 64)
	durationFloat, _ := strconv.ParseFloat(params.Get("range"), 64)
	stepFloat, _ := strconv.ParseFloat(params.Get("step"), 64)
	nanosPerSecond := int64(time.Second / time.Nanosecond)
	end := int64(endFloat) * nanosPerSecond
	duration := int64(durationFloat) * nanosPerSecond
	step := int64(stepFloat) * nanosPerSecond

	exprNode, err := rules.LoadExprFromString(expr)
	if err != nil {
		fmt.Fprint(w, ast.ErrorToJSON(err))
		return
	}
	if exprNode.Type() != ast.VectorType {
		fmt.Fprint(w, ast.ErrorToJSON(errors.New("expression does not evaluate to vector type")))
		return
	}

	if end == 0 {
		end = clientmodel.Now().UnixNano()
	}

	if step <= 0 {
		step = nanosPerSecond
	}

	if end-duration < 0 {
		duration = end
	}

	// For safety, limit the number of returned points per timeseries.
	// This is sufficient for 60s resolution for a week or 1h resolution for a year.
	if duration/step > 11000 {
		fmt.Fprint(w, ast.ErrorToJSON(errors.New("exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)")))
		return
	}

	// Align the start to step "tick" boundary.
	end -= end % step

	queryStats := stats.NewTimerGroup()

	evalTimer := queryStats.GetTimer(stats.TotalEvalTime).Start()
	matrix, err := ast.EvalVectorRange(
		exprNode.(ast.VectorNode),
		clientmodel.TimestampFromUnixNano(end-duration),
		clientmodel.TimestampFromUnixNano(end),
		time.Duration(step),
		serv.Storage,
		queryStats)
	if err != nil {
		fmt.Fprint(w, ast.ErrorToJSON(err))
		return
	}
	evalTimer.Stop()

	sortTimer := queryStats.GetTimer(stats.ResultSortTime).Start()
	sort.Sort(matrix)
	sortTimer.Stop()

	jsonTimer := queryStats.GetTimer(stats.JSONEncodeTime).Start()
	result := ast.TypedValueToJSON(matrix, "matrix")
	jsonTimer.Stop()

	glog.V(1).Infof("Range query: %s\nQuery stats:\n%s\n", expr, queryStats)
	fmt.Fprint(w, result)
}
Example #27
0
func testEvictAndLoadChunkDescs(t *testing.T, encoding chunkEncoding) {
	samples := make(clientmodel.Samples, 10000)
	for i := range samples {
		samples[i] = &clientmodel.Sample{
			Timestamp: clientmodel.Timestamp(2 * i),
			Value:     clientmodel.SampleValue(float64(i * i)),
		}
	}
	// Give last sample a timestamp of now so that the head chunk will not
	// be closed (which would then archive the time series later as
	// everything will get evicted).
	samples[len(samples)-1] = &clientmodel.Sample{
		Timestamp: clientmodel.Now(),
		Value:     clientmodel.SampleValue(3.14),
	}

	s, closer := NewTestStorage(t, encoding)
	defer closer.Close()

	// Adjust memory chunks to lower value to see evictions.
	s.maxMemoryChunks = 1

	for _, sample := range samples {
		s.Append(sample)
	}
	s.WaitForIndexing()

	fp := clientmodel.Metric{}.FastFingerprint()

	series, ok := s.fpToSeries.get(fp)
	if !ok {
		t.Fatal("could not find series")
	}

	oldLen := len(series.chunkDescs)
	// Maintain series without any dropped chunks.
	s.maintainMemorySeries(fp, 0)
	// Give the evict goroutine an opportunity to run.
	time.Sleep(10 * time.Millisecond)
	// Maintain series again to trigger chunkDesc eviction
	s.maintainMemorySeries(fp, 0)

	if oldLen <= len(series.chunkDescs) {
		t.Errorf("Expected number of chunkDescs to decrease, old number %d, current number %d.", oldLen, len(series.chunkDescs))
	}

	// Load everything back.
	p := s.NewPreloader()
	p.PreloadRange(fp, 0, 100000, time.Hour)

	if oldLen != len(series.chunkDescs) {
		t.Errorf("Expected number of chunkDescs to have reached old value again, old number %d, current number %d.", oldLen, len(series.chunkDescs))
	}

	p.Close()

	// Now maintain series with drops to make sure nothing crazy happens.
	s.maintainMemorySeries(fp, 100000)

	if len(series.chunkDescs) != 1 {
		t.Errorf("Expected exactly one chunkDesc left, got %d.", len(series.chunkDescs))
	}
}
Example #28
0
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package extraction

import (
	"sort"
	"strings"
	"testing"

	"github.com/prometheus/client_golang/model"
)

var (
	ts = model.Now()
	in = `
# Only a quite simple scenario with two metric families.
# More complicated tests of the parser itself can be found in the text package.
# TYPE mf2 counter
mf2 3
mf1{label="value1"} -3.14 123456
mf1{label="value2"} 42
mf2 4
`
	out = map[model.LabelValue]*Result{
		"mf1": {
			Samples: model.Samples{
				&model.Sample{
					Metric:    model.Metric{model.MetricNameLabel: "mf1", "label": "value1"},
					Value:     -3.14,
Example #29
0
func TestRetentionCutoff(t *testing.T) {
	now := clientmodel.Now()
	insertStart := now.Add(-2 * time.Hour)

	s, closer := NewTestStorage(t, 1)
	defer closer.Close()

	// Stop maintenance loop to prevent actual purging.
	s.loopStopping <- struct{}{}

	s.dropAfter = 1 * time.Hour

	for i := 0; i < 120; i++ {
		smpl := &clientmodel.Sample{
			Metric:    clientmodel.Metric{"job": "test"},
			Timestamp: insertStart.Add(time.Duration(i) * time.Minute), // 1 minute intervals.
			Value:     1,
		}
		s.Append(smpl)
	}
	s.WaitForIndexing()

	var fp clientmodel.Fingerprint
	for f := range s.fingerprintsForLabelPairs(metric.LabelPair{Name: "job", Value: "test"}) {
		fp = f
		break
	}

	pl := s.NewPreloader()
	defer pl.Close()

	// Preload everything.
	err := pl.PreloadRange(fp, insertStart, now, 5*time.Minute)
	if err != nil {
		t.Fatalf("Error preloading outdated chunks: %s", err)
	}

	it := s.NewIterator(fp)

	vals := it.ValueAtTime(now.Add(-61 * time.Minute))
	if len(vals) != 0 {
		t.Errorf("unexpected result for timestamp before retention period")
	}

	vals = it.RangeValues(metric.Interval{OldestInclusive: insertStart, NewestInclusive: now})
	// We get 59 values here because the clientmodel.Now() is slightly later
	// than our now.
	if len(vals) != 59 {
		t.Errorf("expected 59 values but got %d", len(vals))
	}
	if expt := now.Add(-1 * time.Hour).Add(time.Minute); vals[0].Timestamp != expt {
		t.Errorf("unexpected timestamp for first sample: %v, expected %v", vals[0].Timestamp.Time(), expt.Time())
	}

	vals = it.BoundaryValues(metric.Interval{OldestInclusive: insertStart, NewestInclusive: now})
	if len(vals) != 2 {
		t.Errorf("expected 2 values but got %d", len(vals))
	}
	if expt := now.Add(-1 * time.Hour).Add(time.Minute); vals[0].Timestamp != expt {
		t.Errorf("unexpected timestamp for first sample: %v, expected %v", vals[0].Timestamp.Time(), expt.Time())
	}
}
Example #30
0
func TestDropMetrics(t *testing.T) {
	now := clientmodel.Now()
	insertStart := now.Add(-2 * time.Hour)

	s, closer := NewTestStorage(t, 1)
	defer closer.Close()

	m1 := clientmodel.Metric{clientmodel.MetricNameLabel: "test", "n1": "v1"}
	m2 := clientmodel.Metric{clientmodel.MetricNameLabel: "test", "n1": "v2"}

	N := 120000

	for j, m := range []clientmodel.Metric{m1, m2} {
		for i := 0; i < N; i++ {
			smpl := &clientmodel.Sample{
				Metric:    m,
				Timestamp: insertStart.Add(time.Duration(i) * time.Millisecond), // 1 minute intervals.
				Value:     clientmodel.SampleValue(j),
			}
			s.Append(smpl)
		}
	}
	s.WaitForIndexing()

	fps := s.fingerprintsForLabelPairs(metric.LabelPair{Name: clientmodel.MetricNameLabel, Value: "test"})
	if len(fps) != 2 {
		t.Fatalf("unexpected number of fingerprints: %d", len(fps))
	}

	var fpList clientmodel.Fingerprints
	for fp := range fps {
		it := s.NewIterator(fp)
		if vals := it.RangeValues(metric.Interval{OldestInclusive: insertStart, NewestInclusive: now}); len(vals) != N {
			t.Fatalf("unexpected number of samples: %d", len(vals))
		}
		fpList = append(fpList, fp)
	}

	s.DropMetricsForFingerprints(fpList[0])
	s.WaitForIndexing()

	fps2 := s.fingerprintsForLabelPairs(metric.LabelPair{
		Name: clientmodel.MetricNameLabel, Value: "test",
	})
	if len(fps2) != 1 {
		t.Fatalf("unexpected number of fingerprints: %d", len(fps2))
	}

	it := s.NewIterator(fpList[0])
	if vals := it.RangeValues(metric.Interval{OldestInclusive: insertStart, NewestInclusive: now}); len(vals) != 0 {
		t.Fatalf("unexpected number of samples: %d", len(vals))
	}
	it = s.NewIterator(fpList[1])
	if vals := it.RangeValues(metric.Interval{OldestInclusive: insertStart, NewestInclusive: now}); len(vals) != N {
		t.Fatalf("unexpected number of samples: %d", len(vals))
	}

	s.DropMetricsForFingerprints(fpList...)
	s.WaitForIndexing()

	fps3 := s.fingerprintsForLabelPairs(metric.LabelPair{
		Name: clientmodel.MetricNameLabel, Value: "test",
	})
	if len(fps3) != 0 {
		t.Fatalf("unexpected number of fingerprints: %d", len(fps3))
	}

	it = s.NewIterator(fpList[0])
	if vals := it.RangeValues(metric.Interval{OldestInclusive: insertStart, NewestInclusive: now}); len(vals) != 0 {
		t.Fatalf("unexpected number of samples: %d", len(vals))
	}
	it = s.NewIterator(fpList[1])
	if vals := it.RangeValues(metric.Interval{OldestInclusive: insertStart, NewestInclusive: now}); len(vals) != 0 {
		t.Fatalf("unexpected number of samples: %d", len(vals))
	}
}