func testLabelValues(t test.Tester) { var scenarios = []struct { in LabelValues out LabelValues }{ { in: LabelValues{"ZZZ", "zzz"}, out: LabelValues{"ZZZ", "zzz"}, }, { in: LabelValues{"aaa", "AAA"}, out: LabelValues{"AAA", "aaa"}, }, } for i, scenario := range scenarios { sort.Sort(scenario.in) for j, expected := range scenario.out { if expected != scenario.in[j] { t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) } } } }
func testLabelPairs(t test.Tester) { var scenarios = []struct { in LabelPairs out LabelPairs }{ { in: LabelPairs{ { Name: "AAA", Value: "aaa", }, }, out: LabelPairs{ { Name: "AAA", Value: "aaa", }, }, }, { in: LabelPairs{ { Name: "aaa", Value: "aaa", }, { Name: "ZZZ", Value: "aaa", }, }, out: LabelPairs{ { Name: "ZZZ", Value: "aaa", }, { Name: "aaa", Value: "aaa", }, }, }, } for i, scenario := range scenarios { sort.Sort(scenario.in) for j, expected := range scenario.out { if !expected.Equal(scenario.in[j]) { t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) } } } }
func buildLevelDBTestPersistencesMaker(name string, t test.Tester) func() (MetricPersistence, test.Closer) { return func() (MetricPersistence, test.Closer) { temporaryDirectory := test.NewTemporaryDirectory("get_value_at_time", t) p, err := NewLevelDBMetricPersistence(temporaryDirectory.Path()) if err != nil { t.Errorf("Could not start up LevelDB: %q\n", err) } return p, temporaryDirectory } }
func ReadEmptyTests(p MetricPersistence, t test.Tester) { hasLabelPair := func(x int) (success bool) { name := model.LabelName(string(x)) value := model.LabelValue(string(x)) labelSet := model.LabelSet{ name: value, } fingerprints, err := p.GetFingerprintsForLabelSet(labelSet) if err != nil { t.Error(err) return } success = len(fingerprints) == 0 if !success { t.Errorf("unexpected fingerprint length %d, got %d", 0, len(fingerprints)) } return } err := quick.Check(hasLabelPair, nil) if err != nil { t.Error(err) return } hasLabelName := func(x int) (success bool) { labelName := model.LabelName(string(x)) fingerprints, err := p.GetFingerprintsForLabelName(labelName) if err != nil { t.Error(err) return } success = len(fingerprints) == 0 if !success { t.Errorf("unexpected fingerprint length %d, got %d", 0, len(fingerprints)) } return } err = quick.Check(hasLabelName, nil) if err != nil { t.Error(err) return } }
func AppendSampleAsSparseAppendWithReadsTests(p metric.Persistence, t test.Tester) { appendSample := func(x int) (success bool) { v := clientmodel.SampleValue(x) ts := clientmodel.TimestampFromUnix(int64(x)) labelName := clientmodel.LabelName(x) labelValue := clientmodel.LabelValue(x) l := clientmodel.Metric{labelName: labelValue} sample := &clientmodel.Sample{ Value: v, Timestamp: ts, Metric: l, } err := p.AppendSamples(clientmodel.Samples{sample}) if err != nil { t.Error(err) return } values, err := p.GetLabelValuesForLabelName(labelName) if err != nil { t.Error(err) return } if len(values) != 1 { t.Errorf("expected label values count of %d, got %d", 1, len(values)) return } fingerprints, err := p.GetFingerprintsForLabelMatchers(metric.LabelMatchers{{ Type: metric.Equal, Name: labelName, Value: labelValue, }}) if err != nil { t.Error(err) return } if len(fingerprints) != 1 { t.Errorf("expected fingerprint count of %d, got %d", 1, len(fingerprints)) return } return true } if err := quick.Check(appendSample, nil); err != nil { t.Error(err) } }
func testLevelDBStochastic(t test.Tester) { persistenceMaker := func() (MetricPersistence, test.Closer) { temporaryDirectory := test.NewTemporaryDirectory("test_leveldb_stochastic", t) p, err := NewLevelDBMetricPersistence(temporaryDirectory.Path()) if err != nil { t.Errorf("Could not start up LevelDB: %q\n", err) } return p, temporaryDirectory } StochasticTests(persistenceMaker, t) }
func AppendSampleAsSparseAppendWithReadsTests(p MetricPersistence, t test.Tester) { appendSample := func(x int) (success bool) { v := model.SampleValue(x) ts := time.Unix(int64(x), int64(x)) labelName := model.LabelName(x) labelValue := model.LabelValue(x) l := model.Metric{labelName: labelValue} sample := model.Sample{ Value: v, Timestamp: ts, Metric: l, } err := p.AppendSample(sample) if err != nil { t.Error(err) return } fingerprints, err := p.GetFingerprintsForLabelName(labelName) if err != nil { t.Error(err) return } if len(fingerprints) != 1 { t.Errorf("expected fingerprint count of %d, got %d", 1, len(fingerprints)) return } fingerprints, err = p.GetFingerprintsForLabelSet(model.LabelSet{ labelName: labelValue, }) if err != nil { t.Error(err) return } if len(fingerprints) != 1 { t.Errorf("expected fingerprint count of %d, got %d", 1, len(fingerprints)) return } return true } if err := quick.Check(appendSample, nil); err != nil { t.Error(err) } }
func ReadEmptyTests(p metric.Persistence, t test.Tester) { hasLabelPair := func(x int) (success bool) { fingerprints, err := p.GetFingerprintsForLabelMatchers(metric.LabelMatchers{{ Type: metric.Equal, Name: clientmodel.LabelName(string(x)), Value: clientmodel.LabelValue(string(x)), }}) if err != nil { t.Error(err) return } success = len(fingerprints) == 0 if !success { t.Errorf("unexpected fingerprint length %d, got %d", 0, len(fingerprints)) } return } err := quick.Check(hasLabelPair, nil) if err != nil { t.Error(err) return } hasLabelName := func(x int) (success bool) { labelName := clientmodel.LabelName(string(x)) values, err := p.GetLabelValuesForLabelName(labelName) if err != nil { t.Error(err) return } success = len(values) == 0 if !success { t.Errorf("unexpected values length %d, got %d", 0, len(values)) } return } err = quick.Check(hasLabelName, nil) if err != nil { t.Error(err) return } }
func testMetric(t test.Tester) { var scenarios = []struct { input map[string]string hash uint64 rowkey string }{ { input: map[string]string{}, rowkey: "02676020557754725067--0-", hash: 2676020557754725067, }, { input: map[string]string{ "first_name": "electro", "occupation": "robot", "manufacturer": "westinghouse", }, rowkey: "04776841610193542734-f-56-t", hash: 4776841610193542734, }, { input: map[string]string{ "x": "y", }, rowkey: "01306929544689993150-x-2-y", hash: 1306929544689993150, }, } for i, scenario := range scenarios { metric := Metric{} for key, value := range scenario.input { metric[LabelName(key)] = LabelValue(value) } expectedRowKey := scenario.rowkey expectedHash := scenario.hash fingerprint := NewFingerprintFromMetric(metric) actualRowKey := fingerprint.ToRowKey() actualHash := fingerprint.Hash() if expectedRowKey != actualRowKey { t.Errorf("%d. expected %s, got %s", i, expectedRowKey, actualRowKey) } if actualHash != expectedHash { t.Errorf("%d. expected %d, got %d", i, expectedHash, actualHash) } } }
func GetFingerprintsForLabelSetUsesAndForLabelMatchingTests(p metric.Persistence, t test.Tester) { metrics := []clientmodel.LabelSet{ {clientmodel.MetricNameLabel: "request_metrics_latency_equal_tallying_microseconds", "instance": "http://localhost:9090/metrics.json", "percentile": "0.010000"}, {clientmodel.MetricNameLabel: "requests_metrics_latency_equal_accumulating_microseconds", "instance": "http://localhost:9090/metrics.json", "percentile": "0.010000"}, {clientmodel.MetricNameLabel: "requests_metrics_latency_logarithmic_accumulating_microseconds", "instance": "http://localhost:9090/metrics.json", "percentile": "0.010000"}, {clientmodel.MetricNameLabel: "requests_metrics_latency_logarithmic_tallying_microseconds", "instance": "http://localhost:9090/metrics.json", "percentile": "0.010000"}, {clientmodel.MetricNameLabel: "targets_healthy_scrape_latency_ms", "instance": "http://localhost:9090/metrics.json", "percentile": "0.010000"}, } for _, metric := range metrics { m := clientmodel.Metric{} for k, v := range metric { m[clientmodel.LabelName(k)] = clientmodel.LabelValue(v) } testAppendSamples(p, &clientmodel.Sample{ Value: clientmodel.SampleValue(0.0), Timestamp: clientmodel.Now(), Metric: m, }, t) } labelSet := clientmodel.LabelSet{ clientmodel.MetricNameLabel: "targets_healthy_scrape_latency_ms", "percentile": "0.010000", } fingerprints, err := p.GetFingerprintsForLabelMatchers(labelMatchersFromLabelSet(labelSet)) if err != nil { t.Errorf("could not get labels: %s", err) } if len(fingerprints) != 1 { t.Errorf("did not get a single metric as is expected, got %s", fingerprints) } }
func GetFingerprintsForLabelSetTests(p MetricPersistence, t test.Tester) { testAppendSample(p, model.Sample{ Value: 0, Timestamp: time.Time{}, Metric: model.Metric{ model.MetricNameLabel: "my_metric", "request_type": "your_mom", }, }, t) testAppendSample(p, model.Sample{ Value: 0, Timestamp: time.Time{}, Metric: model.Metric{ model.MetricNameLabel: "my_metric", "request_type": "your_dad", }, }, t) result, err := p.GetFingerprintsForLabelSet(model.LabelSet{ model.MetricNameLabel: model.LabelValue("my_metric"), }) if err != nil { t.Error(err) } if len(result) != 2 { t.Errorf("Expected two elements.") } result, err = p.GetFingerprintsForLabelSet(model.LabelSet{ model.LabelName("request_type"): model.LabelValue("your_mom"), }) if err != nil { t.Error(err) } if len(result) != 1 { t.Errorf("Expected one element.") } result, err = p.GetFingerprintsForLabelSet(model.LabelSet{ model.LabelName("request_type"): model.LabelValue("your_dad"), }) if err != nil { t.Error(err) } if len(result) != 1 { t.Errorf("Expected one element.") } }
func testDiscriminatorHttpHeader(t test.Tester) { var scenarios = []struct { input map[string]string output Processor err error }{ { output: nil, err: fmt.Errorf("Received illegal and nil header."), }, { input: map[string]string{"X-Prometheus-API-Version": "0.0.0"}, output: nil, err: fmt.Errorf("Unrecognized API version 0.0.0"), }, { input: map[string]string{"X-Prometheus-API-Version": "0.0.1"}, output: Processor001, err: nil, }, } for i, scenario := range scenarios { var header http.Header if len(scenario.input) > 0 { header = http.Header{} } for key, value := range scenario.input { header.Add(key, value) } actual, err := DefaultRegistry.ProcessorForRequestHeader(header) if scenario.err != err { if scenario.err != nil && err != nil { if scenario.err.Error() != err.Error() { t.Errorf("%d. expected %s, got %s", i, scenario.err, err) } } else if scenario.err != nil || err != nil { t.Errorf("%d. expected %s, got %s", i, scenario.err, err) } } if scenario.output != actual { t.Errorf("%d. expected %s, got %s", i, scenario.output, actual) } } }
func testTargetPool(t test.Tester) { type expectation struct { size int } type input struct { address string scheduledFor time.Time } type output struct { address string } var scenarios = []struct { name string inputs []input outputs []output }{ { name: "empty", inputs: []input{}, outputs: []output{}, }, { name: "single element", inputs: []input{ { address: "http://single.com", }, }, outputs: []output{ { address: "http://single.com", }, }, }, { name: "plural descending schedules", inputs: []input{ { address: "http://plural-descending.com", scheduledFor: time.Date(2013, 1, 4, 12, 0, 0, 0, time.UTC), }, { address: "http://plural-descending.net", scheduledFor: time.Date(2013, 1, 4, 11, 0, 0, 0, time.UTC), }, }, outputs: []output{ { address: "http://plural-descending.net", }, { address: "http://plural-descending.com", }, }, }, { name: "plural ascending schedules", inputs: []input{ { address: "http://plural-ascending.net", scheduledFor: time.Date(2013, 1, 4, 11, 0, 0, 0, time.UTC), }, { address: "http://plural-ascending.com", scheduledFor: time.Date(2013, 1, 4, 12, 0, 0, 0, time.UTC), }, }, outputs: []output{ { address: "http://plural-ascending.net", }, { address: "http://plural-ascending.com", }, }, }, } for i, scenario := range scenarios { pool := TargetPool{} for _, input := range scenario.inputs { target := target{ address: input.address, scheduler: literalScheduler(input.scheduledFor), } pool.addTarget(&target) } sort.Sort(pool) if pool.Len() != len(scenario.outputs) { t.Errorf("%s %d. expected TargetPool size to be %d but was %d", scenario.name, i, len(scenario.outputs), pool.Len()) } else { for j, output := range scenario.outputs { target := pool.targets[j] if target.Address() != output.address { t.Errorf("%s %d.%d. expected Target address to be %s but was %s", scenario.name, i, j, output.address, target.Address()) } } if pool.Len() != len(scenario.outputs) { t.Errorf("%s %d. expected to repopulated with %d elements, got %d", scenario.name, i, len(scenario.outputs), pool.Len()) } } } }
func GetMetricForFingerprintTests(p metric.Persistence, t test.Tester) { testAppendSamples(p, &clientmodel.Sample{ Value: 0, Timestamp: 0, Metric: clientmodel.Metric{ "request_type": "your_mom", }, }, t) testAppendSamples(p, &clientmodel.Sample{ Value: 0, Timestamp: 0, Metric: clientmodel.Metric{ "request_type": "your_dad", "one-off": "value", }, }, t) result, err := p.GetFingerprintsForLabelMatchers(metric.LabelMatchers{{ Type: metric.Equal, Name: "request_type", Value: "your_mom", }}) if err != nil { t.Error(err) } if len(result) != 1 { t.Errorf("Expected one element.") } m, err := p.GetMetricForFingerprint(result[0]) if err != nil { t.Error(err) } if m == nil { t.Fatal("Did not expect nil.") } if len(m) != 1 { t.Errorf("Expected one-dimensional metric.") } if m["request_type"] != "your_mom" { t.Errorf("Expected metric to match.") } result, err = p.GetFingerprintsForLabelMatchers(metric.LabelMatchers{{ Type: metric.Equal, Name: "request_type", Value: "your_dad", }}) if err != nil { t.Error(err) } if len(result) != 1 { t.Errorf("Expected one element.") } m, err = p.GetMetricForFingerprint(result[0]) if m == nil { t.Fatal("Did not expect nil.") } if err != nil { t.Error(err) } if len(m) != 2 { t.Errorf("Expected two-dimensional metric.") } if m["request_type"] != "your_dad" { t.Errorf("Expected metric to match.") } if m["one-off"] != "value" { t.Errorf("Expected metric to match.") } // Verify that mutating a returned metric does not result in the mutated // metric to be returned at the next GetMetricForFingerprint() call. m["one-off"] = "new value" m, err = p.GetMetricForFingerprint(result[0]) if m == nil { t.Fatal("Did not expect nil.") } if err != nil { t.Error(err) } if len(m) != 2 { t.Errorf("Expected two-dimensional metric.") } if m["request_type"] != "your_dad" { t.Errorf("Expected metric to match.") } if m["one-off"] != "value" { t.Errorf("Expected metric to match.") } }
func testBuilder(t test.Tester) { type atTime struct { fingerprint string time clientmodel.Timestamp } type atInterval struct { fingerprint string from clientmodel.Timestamp through clientmodel.Timestamp interval time.Duration } type atRange struct { fingerprint string from clientmodel.Timestamp through clientmodel.Timestamp } type in struct { atTimes []atTime atIntervals []atInterval atRanges []atRange } type out []struct { fingerprint string operations ops } var scenarios = []struct { in in out out }{ // Ensure that the fingerprint is sorted in proper order. { in: in{ atTimes: []atTime{ { fingerprint: "0000000000000001111-a-4-a", time: clientmodel.TimestampFromUnix(100), }, { fingerprint: "0000000000000000000-a-4-a", time: clientmodel.TimestampFromUnix(100), }, }, }, out: out{ { fingerprint: "00000000000000000000-a-4-a", }, { fingerprint: "00000000000000001111-a-4-a", }, }, }, // // Ensure that the fingerprint-timestamp pairs are sorted in proper order. { in: in{ atTimes: []atTime{ { fingerprint: "1111-a-4-a", time: clientmodel.TimestampFromUnix(100), }, { fingerprint: "1111-a-4-a", time: clientmodel.TimestampFromUnix(200), }, { fingerprint: "0-a-4-a", time: clientmodel.TimestampFromUnix(100), }, { fingerprint: "0-a-4-a", time: clientmodel.TimestampFromUnix(0), }, }, }, out: out{ { fingerprint: "00000000000000000000-a-4-a", }, { fingerprint: "00000000000000000000-a-4-a", }, { fingerprint: "00000000000000001111-a-4-a", }, { fingerprint: "00000000000000001111-a-4-a", }, }, }, // Ensure grouping of operations { in: in{ atTimes: []atTime{ { fingerprint: "1111-a-4-a", time: clientmodel.TimestampFromUnix(100), }, }, atRanges: []atRange{ { fingerprint: "1111-a-4-a", from: clientmodel.TimestampFromUnix(100), through: clientmodel.TimestampFromUnix(1000), }, { fingerprint: "1111-a-4-a", from: clientmodel.TimestampFromUnix(100), through: clientmodel.TimestampFromUnix(9000), }, }, }, out: out{ { fingerprint: "00000000000000001111-a-4-a", }, { fingerprint: "00000000000000001111-a-4-a", }, { fingerprint: "00000000000000001111-a-4-a", }, }, }, } for i, scenario := range scenarios { builder := &viewRequestBuilder{} for _, atTime := range scenario.in.atTimes { fingerprint := &clientmodel.Fingerprint{} fingerprint.LoadFromString(atTime.fingerprint) builder.GetMetricAtTime(fingerprint, atTime.time) } for _, atInterval := range scenario.in.atIntervals { fingerprint := &clientmodel.Fingerprint{} fingerprint.LoadFromString(atInterval.fingerprint) builder.GetMetricAtInterval(fingerprint, atInterval.from, atInterval.through, atInterval.interval) } for _, atRange := range scenario.in.atRanges { fingerprint := &clientmodel.Fingerprint{} fingerprint.LoadFromString(atRange.fingerprint) builder.GetMetricRange(fingerprint, atRange.from, atRange.through) } for j, job := range scenario.out { got := builder.PopOp() if got.Fingerprint().String() != job.fingerprint { t.Errorf("%d.%d. expected fingerprint %s, got %s", i, j, job.fingerprint, got.Fingerprint()) } } if builder.HasOp() { t.Error("Expected builder to have no scan jobs left.") } } }
func StochasticTests(persistenceMaker func() (metric.Persistence, test.Closer), t test.Tester) { stochastic := func(x int) (success bool) { p, closer := persistenceMaker() defer closer.Close() defer p.Close() seed := rand.NewSource(int64(x)) random := rand.New(seed) numberOfMetrics := random.Intn(stochasticMaximumVariance) + 1 numberOfSharedLabels := random.Intn(stochasticMaximumVariance) numberOfUnsharedLabels := random.Intn(stochasticMaximumVariance) numberOfSamples := random.Intn(stochasticMaximumVariance) + 2 numberOfRangeScans := random.Intn(stochasticMaximumVariance) metricTimestamps := map[int]map[int64]bool{} metricEarliestSample := map[int]int64{} metricNewestSample := map[int]int64{} for metricIndex := 0; metricIndex < numberOfMetrics; metricIndex++ { sample := &clientmodel.Sample{ Metric: clientmodel.Metric{}, } v := clientmodel.LabelValue(fmt.Sprintf("metric_index_%d", metricIndex)) sample.Metric[clientmodel.MetricNameLabel] = v for sharedLabelIndex := 0; sharedLabelIndex < numberOfSharedLabels; sharedLabelIndex++ { l := clientmodel.LabelName(fmt.Sprintf("shared_label_%d", sharedLabelIndex)) v := clientmodel.LabelValue(fmt.Sprintf("label_%d", sharedLabelIndex)) sample.Metric[l] = v } for unsharedLabelIndex := 0; unsharedLabelIndex < numberOfUnsharedLabels; unsharedLabelIndex++ { l := clientmodel.LabelName(fmt.Sprintf("metric_index_%d_private_label_%d", metricIndex, unsharedLabelIndex)) v := clientmodel.LabelValue(fmt.Sprintf("private_label_%d", unsharedLabelIndex)) sample.Metric[l] = v } timestamps := map[int64]bool{} metricTimestamps[metricIndex] = timestamps var newestSample int64 = math.MinInt64 var oldestSample int64 = math.MaxInt64 var nextTimestamp func() int64 nextTimestamp = func() int64 { var candidate int64 candidate = random.Int63n(math.MaxInt32 - 1) if _, has := timestamps[candidate]; has { // WART candidate = nextTimestamp() } timestamps[candidate] = true if candidate < oldestSample { oldestSample = candidate } if candidate > newestSample { newestSample = candidate } return candidate } // BUG(matt): Invariant of the in-memory database assumes this. sortedTimestamps := timeslice{} for sampleIndex := 0; sampleIndex < numberOfSamples; sampleIndex++ { sortedTimestamps = append(sortedTimestamps, clientmodel.TimestampFromUnix(nextTimestamp())) } sort.Sort(sortedTimestamps) for sampleIndex := 0; sampleIndex < numberOfSamples; sampleIndex++ { sample.Timestamp = sortedTimestamps[sampleIndex] sample.Value = clientmodel.SampleValue(sampleIndex) err := p.AppendSamples(clientmodel.Samples{sample}) if err != nil { t.Error(err) return } } metricEarliestSample[metricIndex] = oldestSample metricNewestSample[metricIndex] = newestSample for sharedLabelIndex := 0; sharedLabelIndex < numberOfSharedLabels; sharedLabelIndex++ { matchers := metric.LabelMatchers{{ Type: metric.Equal, Name: clientmodel.LabelName(fmt.Sprintf("shared_label_%d", sharedLabelIndex)), Value: clientmodel.LabelValue(fmt.Sprintf("label_%d", sharedLabelIndex)), }} fingerprints, err := p.GetFingerprintsForLabelMatchers(matchers) if err != nil { t.Error(err) return } if len(fingerprints) == 0 { t.Errorf("expected fingerprint count of %d, got %d", 0, len(fingerprints)) return } } } for metricIndex := 0; metricIndex < numberOfMetrics; metricIndex++ { for unsharedLabelIndex := 0; unsharedLabelIndex < numberOfUnsharedLabels; unsharedLabelIndex++ { labelName := clientmodel.LabelName(fmt.Sprintf("metric_index_%d_private_label_%d", metricIndex, unsharedLabelIndex)) labelValue := clientmodel.LabelValue(fmt.Sprintf("private_label_%d", unsharedLabelIndex)) matchers := metric.LabelMatchers{{ Type: metric.Equal, Name: labelName, Value: labelValue, }} fingerprints, err := p.GetFingerprintsForLabelMatchers(matchers) if err != nil { t.Error(err) return } if len(fingerprints) != 1 { t.Errorf("expected fingerprint count of %d, got %d", 1, len(fingerprints)) return } } m := clientmodel.Metric{} m[clientmodel.MetricNameLabel] = clientmodel.LabelValue(fmt.Sprintf("metric_index_%d", metricIndex)) for i := 0; i < numberOfSharedLabels; i++ { l := clientmodel.LabelName(fmt.Sprintf("shared_label_%d", i)) v := clientmodel.LabelValue(fmt.Sprintf("label_%d", i)) m[l] = v } for i := 0; i < numberOfUnsharedLabels; i++ { l := clientmodel.LabelName(fmt.Sprintf("metric_index_%d_private_label_%d", metricIndex, i)) v := clientmodel.LabelValue(fmt.Sprintf("private_label_%d", i)) m[l] = v } for i := 0; i < numberOfRangeScans; i++ { timestamps := metricTimestamps[metricIndex] var first int64 var second int64 for { firstCandidate := random.Int63n(int64(len(timestamps))) secondCandidate := random.Int63n(int64(len(timestamps))) smallest := int64(-1) largest := int64(-1) if firstCandidate == secondCandidate { continue } else if firstCandidate > secondCandidate { largest = firstCandidate smallest = secondCandidate } else { largest = secondCandidate smallest = firstCandidate } j := int64(0) for i := range timestamps { if j == smallest { first = i } else if j == largest { second = i break } j++ } break } begin := first end := second if second < first { begin, end = second, first } interval := metric.Interval{ OldestInclusive: clientmodel.TimestampFromUnix(begin), NewestInclusive: clientmodel.TimestampFromUnix(end), } samples := metric.Values{} fp := &clientmodel.Fingerprint{} fp.LoadFromMetric(m) switch persistence := p.(type) { case metric.View: samples = persistence.GetRangeValues(fp, interval) if len(samples) < 2 { t.Fatalf("expected sample count greater than %d, got %d", 2, len(samples)) } case *LevelDBPersistence: var err error samples, err = levelDBGetRangeValues(persistence, fp, interval) if err != nil { t.Fatal(err) } if len(samples) < 2 { t.Fatalf("expected sample count greater than %d, got %d", 2, len(samples)) } default: t.Error("Unexpected type of metric.Persistence.") } } } return true } if err := quick.Check(stochastic, nil); err != nil { t.Error(err) } }
func StochasticTests(persistenceMaker func() (MetricPersistence, test.Closer), t test.Tester) { stochastic := func(x int) (success bool) { p, closer := persistenceMaker() defer closer.Close() defer p.Close() seed := rand.NewSource(int64(x)) random := rand.New(seed) numberOfMetrics := random.Intn(stochasticMaximumVariance) + 1 numberOfSharedLabels := random.Intn(stochasticMaximumVariance) numberOfUnsharedLabels := random.Intn(stochasticMaximumVariance) numberOfSamples := random.Intn(stochasticMaximumVariance) + 2 numberOfRangeScans := random.Intn(stochasticMaximumVariance) metricTimestamps := map[int]map[int64]bool{} metricEarliestSample := map[int]int64{} metricNewestSample := map[int]int64{} for metricIndex := 0; metricIndex < numberOfMetrics; metricIndex++ { sample := model.Sample{ Metric: model.Metric{}, } v := model.LabelValue(fmt.Sprintf("metric_index_%d", metricIndex)) sample.Metric[model.MetricNameLabel] = v for sharedLabelIndex := 0; sharedLabelIndex < numberOfSharedLabels; sharedLabelIndex++ { l := model.LabelName(fmt.Sprintf("shared_label_%d", sharedLabelIndex)) v := model.LabelValue(fmt.Sprintf("label_%d", sharedLabelIndex)) sample.Metric[l] = v } for unsharedLabelIndex := 0; unsharedLabelIndex < numberOfUnsharedLabels; unsharedLabelIndex++ { l := model.LabelName(fmt.Sprintf("metric_index_%d_private_label_%d", metricIndex, unsharedLabelIndex)) v := model.LabelValue(fmt.Sprintf("private_label_%d", unsharedLabelIndex)) sample.Metric[l] = v } timestamps := map[int64]bool{} metricTimestamps[metricIndex] = timestamps var ( newestSample int64 = math.MinInt64 oldestSample int64 = math.MaxInt64 nextTimestamp func() int64 ) nextTimestamp = func() int64 { var candidate int64 candidate = random.Int63n(math.MaxInt32 - 1) if _, has := timestamps[candidate]; has { // WART candidate = nextTimestamp() } timestamps[candidate] = true if candidate < oldestSample { oldestSample = candidate } if candidate > newestSample { newestSample = candidate } return candidate } for sampleIndex := 0; sampleIndex < numberOfSamples; sampleIndex++ { sample.Timestamp = time.Unix(nextTimestamp(), 0) sample.Value = model.SampleValue(sampleIndex) err := p.AppendSample(sample) if err != nil { t.Error(err) return } } metricEarliestSample[metricIndex] = oldestSample metricNewestSample[metricIndex] = newestSample for sharedLabelIndex := 0; sharedLabelIndex < numberOfSharedLabels; sharedLabelIndex++ { labelPair := model.LabelSet{ model.LabelName(fmt.Sprintf("shared_label_%d", sharedLabelIndex)): model.LabelValue(fmt.Sprintf("label_%d", sharedLabelIndex)), } fingerprints, err := p.GetFingerprintsForLabelSet(labelPair) if err != nil { t.Error(err) return } if len(fingerprints) == 0 { t.Errorf("expected fingerprint count of %d, got %d", 0, len(fingerprints)) return } labelName := model.LabelName(fmt.Sprintf("shared_label_%d", sharedLabelIndex)) fingerprints, err = p.GetFingerprintsForLabelName(labelName) if err != nil { t.Error(err) return } if len(fingerprints) == 0 { t.Errorf("expected fingerprint count of %d, got %d", 0, len(fingerprints)) return } } } for sharedIndex := 0; sharedIndex < numberOfSharedLabels; sharedIndex++ { labelName := model.LabelName(fmt.Sprintf("shared_label_%d", sharedIndex)) fingerprints, err := p.GetFingerprintsForLabelName(labelName) if err != nil { t.Error(err) return } if len(fingerprints) != numberOfMetrics { t.Errorf("expected fingerprint count of %d, got %d", numberOfMetrics, len(fingerprints)) return } } for metricIndex := 0; metricIndex < numberOfMetrics; metricIndex++ { for unsharedLabelIndex := 0; unsharedLabelIndex < numberOfUnsharedLabels; unsharedLabelIndex++ { labelName := model.LabelName(fmt.Sprintf("metric_index_%d_private_label_%d", metricIndex, unsharedLabelIndex)) labelValue := model.LabelValue(fmt.Sprintf("private_label_%d", unsharedLabelIndex)) labelSet := model.LabelSet{ labelName: labelValue, } fingerprints, err := p.GetFingerprintsForLabelSet(labelSet) if err != nil { t.Error(err) return } if len(fingerprints) != 1 { t.Errorf("expected fingerprint count of %d, got %d", 1, len(fingerprints)) return } fingerprints, err = p.GetFingerprintsForLabelName(labelName) if err != nil { t.Error(err) return } if len(fingerprints) != 1 { t.Errorf("expected fingerprint count of %d, got %d", 1, len(fingerprints)) return } } metric := model.Metric{} metric[model.MetricNameLabel] = model.LabelValue(fmt.Sprintf("metric_index_%d", metricIndex)) for i := 0; i < numberOfSharedLabels; i++ { l := model.LabelName(fmt.Sprintf("shared_label_%d", i)) v := model.LabelValue(fmt.Sprintf("label_%d", i)) metric[l] = v } for i := 0; i < numberOfUnsharedLabels; i++ { l := model.LabelName(fmt.Sprintf("metric_index_%d_private_label_%d", metricIndex, i)) v := model.LabelValue(fmt.Sprintf("private_label_%d", i)) metric[l] = v } for i := 0; i < numberOfRangeScans; i++ { timestamps := metricTimestamps[metricIndex] var first int64 = 0 var second int64 = 0 for { firstCandidate := random.Int63n(int64(len(timestamps))) secondCandidate := random.Int63n(int64(len(timestamps))) smallest := int64(-1) largest := int64(-1) if firstCandidate == secondCandidate { continue } else if firstCandidate > secondCandidate { largest = firstCandidate smallest = secondCandidate } else { largest = secondCandidate smallest = firstCandidate } j := int64(0) for i := range timestamps { if j == smallest { first = i } else if j == largest { second = i break } j++ } break } begin := first end := second if second < first { begin, end = second, first } interval := model.Interval{ OldestInclusive: time.Unix(begin, 0), NewestInclusive: time.Unix(end, 0), } samples, err := p.GetRangeValues(model.NewFingerprintFromMetric(metric), interval) if err != nil { t.Error(err) return } if len(samples.Values) < 2 { t.Errorf("expected sample count less than %d, got %d", 2, len(samples.Values)) return } } } return true } if err := quick.Check(stochastic, nil); err != nil { t.Error(err) } }
func GetFingerprintsForLabelNameTests(p MetricPersistence, t test.Tester) { testAppendSample(p, model.Sample{ Value: 0, Timestamp: time.Time{}, Metric: model.Metric{ model.MetricNameLabel: "my_metric", "request_type": "your_mom", "language": "english", }, }, t) testAppendSample(p, model.Sample{ Value: 0, Timestamp: time.Time{}, Metric: model.Metric{ model.MetricNameLabel: "my_metric", "request_type": "your_dad", "sprache": "deutsch", }, }, t) b := model.MetricNameLabel result, err := p.GetFingerprintsForLabelName(b) if err != nil { t.Error(err) } if len(result) != 2 { t.Errorf("Expected two elements.") } b = model.LabelName("request_type") result, err = p.GetFingerprintsForLabelName(b) if err != nil { t.Error(err) } if len(result) != 2 { t.Errorf("Expected two elements.") } b = model.LabelName("language") result, err = p.GetFingerprintsForLabelName(b) if err != nil { t.Error(err) } if len(result) != 1 { t.Errorf("Expected one element.") } b = model.LabelName("sprache") result, err = p.GetFingerprintsForLabelName(b) if err != nil { t.Error(err) } if len(result) != 1 { t.Errorf("Expected one element.") } }
func testProcessor001Process(t test.Tester) { var scenarios = []struct { in string out []Result err error }{ { err: fmt.Errorf("unexpected end of JSON input"), }, { in: "[{\"baseLabels\":{\"name\":\"rpc_calls_total\"},\"docstring\":\"RPC calls.\",\"metric\":{\"type\":\"counter\",\"value\":[{\"labels\":{\"service\":\"zed\"},\"value\":25},{\"labels\":{\"service\":\"bar\"},\"value\":25},{\"labels\":{\"service\":\"foo\"},\"value\":25}]}},{\"baseLabels\":{\"name\":\"rpc_latency_microseconds\"},\"docstring\":\"RPC latency.\",\"metric\":{\"type\":\"histogram\",\"value\":[{\"labels\":{\"service\":\"foo\"},\"value\":{\"0.010000\":15.890724674774395,\"0.050000\":15.890724674774395,\"0.500000\":84.63044031436561,\"0.900000\":160.21100853053224,\"0.990000\":172.49828748957728}},{\"labels\":{\"service\":\"zed\"},\"value\":{\"0.010000\":0.0459814091918713,\"0.050000\":0.0459814091918713,\"0.500000\":0.6120456642749681,\"0.900000\":1.355915069887731,\"0.990000\":1.772733213161236}},{\"labels\":{\"service\":\"bar\"},\"value\":{\"0.010000\":78.48563317257356,\"0.050000\":78.48563317257356,\"0.500000\":97.31798360385088,\"0.900000\":109.89202084295582,\"0.990000\":109.99626121011262}}]}}]", out: []Result{ { Sample: model.Sample{ Metric: model.Metric{"service": "zed", model.MetricNameLabel: "rpc_calls_total"}, Value: 25, }, }, { Sample: model.Sample{ Metric: model.Metric{"service": "bar", model.MetricNameLabel: "rpc_calls_total"}, Value: 25, }, }, { Sample: model.Sample{ Metric: model.Metric{"service": "foo", model.MetricNameLabel: "rpc_calls_total"}, Value: 25, }, }, { Sample: model.Sample{ Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, Value: 0.04598141, }, }, { Sample: model.Sample{ Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, Value: 78.485634, }, }, { Sample: model.Sample{ Metric: model.Metric{"percentile": "0.010000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, Value: 15.890724674774395, }, }, { Sample: model.Sample{ Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, Value: 0.04598141, }, }, { Sample: model.Sample{ Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, Value: 78.485634, }, }, { Sample: model.Sample{ Metric: model.Metric{"percentile": "0.050000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, Value: 15.890724674774395, }, }, { Sample: model.Sample{ Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, Value: 0.61204565, }, }, { Sample: model.Sample{ Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, Value: 97.317986, }, }, { Sample: model.Sample{ Metric: model.Metric{"percentile": "0.500000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, Value: 84.63044, }, }, { Sample: model.Sample{ Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, Value: 1.3559151, }, }, { Sample: model.Sample{ Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, Value: 109.89202, }, }, { Sample: model.Sample{ Metric: model.Metric{"percentile": "0.900000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, Value: 160.21101, }, }, { Sample: model.Sample{ Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "zed"}, Value: 1.7727332, }, }, { Sample: model.Sample{ Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "bar"}, Value: 109.99626, }, }, { Sample: model.Sample{ Metric: model.Metric{"percentile": "0.990000", model.MetricNameLabel: "rpc_latency_microseconds", "service": "foo"}, Value: 172.49829, }, }, }, }, } for i, scenario := range scenarios { inputChannel := make(chan Result, 1024) defer func(c chan Result) { close(c) }(inputChannel) reader := strings.NewReader(scenario.in) err := Processor001.Process(ioutil.NopCloser(reader), model.LabelSet{}, inputChannel) if !test.ErrorEqual(scenario.err, err) { t.Errorf("%d. expected err of %s, got %s", i, scenario.err, err) continue } if scenario.err != nil && err != nil { if scenario.err.Error() != err.Error() { t.Errorf("%d. expected err of %s, got %s", i, scenario.err, err) } } else if scenario.err != err { t.Errorf("%d. expected err of %s, got %s", i, scenario.err, err) } delivered := make([]Result, 0) for len(inputChannel) != 0 { delivered = append(delivered, <-inputChannel) } if len(delivered) != len(scenario.out) { t.Errorf("%d. expected output length of %d, got %d", i, len(scenario.out), len(delivered)) continue } expectedElements := list.New() for _, j := range scenario.out { expectedElements.PushBack(j) } for j := 0; j < len(delivered); j++ { actual := delivered[j] found := false for element := expectedElements.Front(); element != nil && found == false; element = element.Next() { candidate := element.Value.(Result) if !test.ErrorEqual(candidate.Err, actual.Err) { continue } if candidate.Sample.Value != actual.Sample.Value { continue } if len(candidate.Sample.Metric) != len(actual.Sample.Metric) { continue } labelsMatch := false for key, value := range candidate.Sample.Metric { actualValue, ok := actual.Sample.Metric[key] if !ok { break } if actualValue == value { labelsMatch = true break } } if !labelsMatch { continue } // XXX: Test time. found = true expectedElements.Remove(element) } if !found { t.Errorf("%d.%d. expected to find %s among candidate, absent", i, j, actual.Sample) } } } }
func testHealthScheduler(t test.Tester) { now := time.Now() var scenarios = []struct { futureHealthState []TargetState preloadedTimes []time.Time expectedSchedule []time.Time }{ // The behavior discussed in healthScheduler.Reschedule should be read // fully to understand the whys and wherefores. { futureHealthState: []TargetState{UNKNOWN, ALIVE, ALIVE}, preloadedTimes: []time.Time{now, now.Add(time.Minute), now.Add(time.Minute * 2)}, expectedSchedule: []time.Time{now, now.Add(time.Minute), now.Add(time.Minute * 2)}, }, { futureHealthState: []TargetState{UNKNOWN, UNREACHABLE, UNREACHABLE}, preloadedTimes: []time.Time{now, now.Add(time.Minute), now.Add(time.Minute * 2)}, expectedSchedule: []time.Time{now, now.Add(time.Second * 2), now.Add(time.Minute).Add(time.Second * 4)}, }, { futureHealthState: []TargetState{UNKNOWN, UNREACHABLE, ALIVE}, preloadedTimes: []time.Time{now, now.Add(time.Minute), now.Add(time.Minute * 2)}, expectedSchedule: []time.Time{now, now.Add(time.Second * 2), now.Add(time.Minute * 2)}, }, { futureHealthState: []TargetState{UNKNOWN, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE}, preloadedTimes: []time.Time{now, now.Add(time.Minute), now.Add(time.Minute * 2), now.Add(time.Minute * 3), now.Add(time.Minute * 4), now.Add(time.Minute * 5), now.Add(time.Minute * 6), now.Add(time.Minute * 7), now.Add(time.Minute * 8), now.Add(time.Minute * 9), now.Add(time.Minute * 10), now.Add(time.Minute * 11), now.Add(time.Minute * 12)}, expectedSchedule: []time.Time{now, now.Add(time.Second * 2), now.Add(time.Minute * 1).Add(time.Second * 4), now.Add(time.Minute * 2).Add(time.Second * 8), now.Add(time.Minute * 3).Add(time.Second * 16), now.Add(time.Minute * 4).Add(time.Second * 32), now.Add(time.Minute * 5).Add(time.Second * 64), now.Add(time.Minute * 6).Add(time.Second * 128), now.Add(time.Minute * 7).Add(time.Second * 256), now.Add(time.Minute * 8).Add(time.Second * 512), now.Add(time.Minute * 9).Add(time.Second * 1024), now.Add(time.Minute * 10).Add(time.Minute * 30), now.Add(time.Minute * 11).Add(time.Minute * 30)}, }, } for i, scenario := range scenarios { provider := test.NewInstantProvider(scenario.preloadedTimes) reporter := fakeHealthReporter{} for _, state := range scenario.futureHealthState { reporter.stateQueue = append(reporter.stateQueue, state) } if len(scenario.preloadedTimes) != len(scenario.futureHealthState) || len(scenario.futureHealthState) != len(scenario.expectedSchedule) { t.Fatalf("%d. times and health reports and next time lengths were not equal.", i) } time := utility.Time{ Provider: provider, } scheduler := healthScheduler{ time: time, target: reporter, scheduledFor: now, } for j := 0; j < len(scenario.preloadedTimes); j++ { futureState := scenario.futureHealthState[j] scheduler.Reschedule(scenario.preloadedTimes[j], futureState) nextSchedule := scheduler.ScheduledFor() if nextSchedule != scenario.expectedSchedule[j] { t.Errorf("%d.%d. Expected to be scheduled to %s, got %s", i, j, scenario.expectedSchedule[j], nextSchedule) } } } }
func BasicLifecycleTests(p MetricPersistence, t test.Tester) { if p == nil { t.Errorf("Received nil Metric Persistence.\n") return } }
func testMakeView(t test.Tester, flushToDisk bool) { type in struct { atTime []getValuesAtTimeOp atInterval []getValuesAtIntervalOp alongRange []getValuesAlongRangeOp } type out struct { atTime []metric.Values atInterval []metric.Values alongRange []metric.Values } m := clientmodel.Metric{clientmodel.MetricNameLabel: "request_count"} fingerprint := &clientmodel.Fingerprint{} fingerprint.LoadFromMetric(m) var ( instant = clientmodel.TimestampFromTime(time.Date(1984, 3, 30, 0, 0, 0, 0, time.Local)) scenarios = []struct { data clientmodel.Samples in in out out diskOnly bool }{ // No sample, but query asks for one. { in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant}, }, }, }, out: out{ atTime: []metric.Values{{}}, }, }, // Single sample, query asks for exact sample time. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant, }, }, in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant, Value: 0, }, }, }, }, }, // Single sample, query time before the sample. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant.Add(time.Second), }, { Metric: m, Value: 1, Timestamp: instant.Add(time.Second * 2), }, }, in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant.Add(time.Second), Value: 0, }, }, }, }, }, // Single sample, query time after the sample. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant, }, }, in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant.Add(time.Second)}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant, Value: 0, }, }, }, }, }, // Two samples, query asks for first sample time. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant, }, { Metric: m, Value: 1, Timestamp: instant.Add(time.Second), }, }, in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant, Value: 0, }, }, }, }, }, // Three samples, query asks for second sample time. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant, }, { Metric: m, Value: 1, Timestamp: instant.Add(time.Second), }, { Metric: m, Value: 2, Timestamp: instant.Add(time.Second * 2), }, }, in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant.Add(time.Second)}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant.Add(time.Second), Value: 1, }, }, }, }, }, // Three samples, query asks for time between first and second samples. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant, }, { Metric: m, Value: 1, Timestamp: instant.Add(time.Second * 2), }, { Metric: m, Value: 2, Timestamp: instant.Add(time.Second * 4), }, }, in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant.Add(time.Second)}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant, Value: 0, }, { Timestamp: instant.Add(time.Second * 2), Value: 1, }, }, }, }, }, // Three samples, query asks for time between second and third samples. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant, }, { Metric: m, Value: 1, Timestamp: instant.Add(time.Second * 2), }, { Metric: m, Value: 2, Timestamp: instant.Add(time.Second * 4), }, }, in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant.Add(time.Second * 3)}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant.Add(time.Second * 2), Value: 1, }, { Timestamp: instant.Add(time.Second * 4), Value: 2, }, }, }, }, }, // Two chunks of samples, query asks for values from second chunk. { data: buildSamples( instant, instant.Add(time.Duration(*leveldbChunkSize*4)*time.Second), 2*time.Second, m, ), in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant.Add(time.Second*time.Duration(*leveldbChunkSize*2) + clientmodel.MinimumTick)}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant.Add(time.Second * time.Duration(*leveldbChunkSize*2)), Value: 200, }, { Timestamp: instant.Add(time.Second * (time.Duration(*leveldbChunkSize*2) + 2)), Value: 201, }, }, }, }, }, // Two chunks of samples, query asks for values between both chunks. { data: buildSamples( instant, instant.Add(time.Duration(*leveldbChunkSize*4)*time.Second), 2*time.Second, m, ), in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant.Add(time.Second*time.Duration(*leveldbChunkSize*2) - clientmodel.MinimumTick)}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant.Add(time.Second * (time.Duration(*leveldbChunkSize*2) - 2)), Value: 199, }, { Timestamp: instant.Add(time.Second * time.Duration(*leveldbChunkSize*2)), Value: 200, }, }, }, }, }, // Two chunks of samples, getValuesAtIntervalOp spanning both. { data: buildSamples( instant, instant.Add(time.Duration(*leveldbChunkSize*6)*time.Second), 2*time.Second, m, ), in: in{ atInterval: []getValuesAtIntervalOp{ { getValuesAlongRangeOp: getValuesAlongRangeOp{ baseOp: baseOp{current: instant.Add(time.Second*time.Duration(*leveldbChunkSize*2-4) - clientmodel.MinimumTick)}, through: instant.Add(time.Second*time.Duration(*leveldbChunkSize*2+4) + clientmodel.MinimumTick), }, interval: time.Second * 6, }, }, }, out: out{ atInterval: []metric.Values{ { { Timestamp: instant.Add(time.Second * time.Duration(*leveldbChunkSize*2-6)), Value: 197, }, { Timestamp: instant.Add(time.Second * time.Duration(*leveldbChunkSize*2-4)), Value: 198, }, { Timestamp: instant.Add(time.Second * time.Duration(*leveldbChunkSize*2)), Value: 200, }, { Timestamp: instant.Add(time.Second * time.Duration(*leveldbChunkSize*2+2)), Value: 201, }, }, }, }, }, // Three chunks of samples, getValuesAlongRangeOp spanning all of them. { data: buildSamples( instant, instant.Add(time.Duration(*leveldbChunkSize*6)*time.Second), 2*time.Second, m, ), in: in{ alongRange: []getValuesAlongRangeOp{ { baseOp: baseOp{current: instant.Add(time.Second*time.Duration(*leveldbChunkSize*2-4) - clientmodel.MinimumTick)}, through: instant.Add(time.Second*time.Duration(*leveldbChunkSize*4+2) + clientmodel.MinimumTick), }, }, }, out: out{ alongRange: []metric.Values{buildValues( clientmodel.SampleValue(198), instant.Add(time.Second*time.Duration(*leveldbChunkSize*2-4)), instant.Add(time.Second*time.Duration(*leveldbChunkSize*4+2)+clientmodel.MinimumTick), 2*time.Second, )}, }, }, // Three chunks of samples and a getValuesAlongIntervalOp with an // interval larger than the natural sample interval, spanning the gap // between the second and third chunks. To test two consecutive // ExtractSamples() calls for the same op, we need three on-disk chunks, // because the first two chunks are loaded from disk together and passed // as one unit into ExtractSamples(). Especially, we want to test that // the first sample of the last chunk is included in the result. // // This is a regression test for an interval operator advancing too far // past the end of the currently available chunk, effectively skipping // over a value which is only available in the next chunk passed to // ExtractSamples(). // // Chunk and operator layout, assuming 200 samples per chunk: // // Chunk 1 Chunk 2 Chunk 3 // Values: 0......199 200......399 400......599 // Times: 0......398 400......798 800......1198 // | | // |_________ Operator _______| // 395 399 ...... 795 799 803 { data: buildSamples( instant, instant.Add(time.Duration(*leveldbChunkSize*6)*time.Second), 2*time.Second, m, ), in: in{ atInterval: []getValuesAtIntervalOp{ { getValuesAlongRangeOp: getValuesAlongRangeOp{ baseOp: baseOp{current: instant.Add(time.Second * time.Duration(*leveldbChunkSize*2-5))}, through: instant.Add(time.Second * time.Duration(*leveldbChunkSize*4+3)), }, interval: time.Second * 4, }, }, }, out: out{ atInterval: []metric.Values{ // We need two overlapping buildValues() calls here since the last // value of the second chunk is extracted twice (value 399, time // offset 798s). append( // Values 197...399. // Times 394...798. buildValues( clientmodel.SampleValue(197), instant.Add(time.Second*time.Duration(*leveldbChunkSize*2-6)), instant.Add(time.Second*time.Duration(*leveldbChunkSize*4)), 2*time.Second, ), // Values 399...402. // Times 798...804. buildValues( clientmodel.SampleValue(399), instant.Add(time.Second*time.Duration(*leveldbChunkSize*4-2)), instant.Add(time.Second*time.Duration(*leveldbChunkSize*4+6)), 2*time.Second, )..., ), }, }, // This example only works with on-disk chunks due to the repeatedly // extracted value at the end of the second chunk. diskOnly: true, }, // Single sample, getValuesAtIntervalOp starting after the sample. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant, }, }, in: in{ atInterval: []getValuesAtIntervalOp{ { getValuesAlongRangeOp: getValuesAlongRangeOp{ baseOp: baseOp{current: instant.Add(time.Second)}, through: instant.Add(time.Second * 2), }, interval: time.Second, }, }, }, out: out{ atInterval: []metric.Values{ { { Timestamp: instant, Value: 0, }, }, }, }, }, // Single sample, getValuesAtIntervalOp starting before the sample. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant.Add(time.Second), }, }, in: in{ atInterval: []getValuesAtIntervalOp{ { getValuesAlongRangeOp: getValuesAlongRangeOp{ baseOp: baseOp{current: instant}, through: instant.Add(time.Second * 2), }, interval: time.Second, }, }, }, out: out{ atInterval: []metric.Values{ { { Timestamp: instant.Add(time.Second), Value: 0, }, { Timestamp: instant.Add(time.Second), Value: 0, }, }, }, }, }, } ) for i, scenario := range scenarios { if scenario.diskOnly && !flushToDisk { continue } tiered, closer := NewTestTieredStorage(t) err := tiered.AppendSamples(scenario.data) if err != nil { t.Fatalf("%d. failed to add fixture data: %s", i, err) } if flushToDisk { tiered.Flush() } requestBuilder := tiered.NewViewRequestBuilder() for _, atTime := range scenario.in.atTime { requestBuilder.GetMetricAtTime(fingerprint, atTime.current) } for _, atInterval := range scenario.in.atInterval { requestBuilder.GetMetricAtInterval(fingerprint, atInterval.current, atInterval.through, atInterval.interval) } for _, alongRange := range scenario.in.alongRange { requestBuilder.GetMetricRange(fingerprint, alongRange.current, alongRange.through) } v, err := requestBuilder.Execute(time.Second*5, stats.NewTimerGroup()) if err != nil { t.Fatalf("%d. failed due to %s", i, err) } // To get all values in the View, ask for the 'forever' interval. interval := metric.Interval{OldestInclusive: math.MinInt64, NewestInclusive: math.MaxInt64} for j, atTime := range scenario.out.atTime { actual := v.GetRangeValues(fingerprint, interval) if len(actual) != len(atTime) { t.Fatalf("%d.%d. expected %d output, got %d", i, j, len(atTime), len(actual)) } for k, value := range atTime { if value.Value != actual[k].Value { t.Errorf("%d.%d.%d expected %v value, got %v", i, j, k, value.Value, actual[k].Value) } if !value.Timestamp.Equal(actual[k].Timestamp) { t.Errorf("%d.%d.%d expected %s (offset %ss) timestamp, got %s (offset %ss)", i, j, k, value.Timestamp, value.Timestamp.Sub(instant), actual[k].Timestamp, actual[k].Timestamp.Sub(instant)) } } } for j, atInterval := range scenario.out.atInterval { actual := v.GetRangeValues(fingerprint, interval) if len(actual) != len(atInterval) { t.Fatalf("%d.%d. expected %d output, got %d", i, j, len(atInterval), len(actual)) } for k, value := range atInterval { if value.Value != actual[k].Value { t.Errorf("%d.%d.%d expected %v value, got %v", i, j, k, value.Value, actual[k].Value) } if !value.Timestamp.Equal(actual[k].Timestamp) { t.Errorf("%d.%d.%d expected %s (offset %ds) timestamp, got %s (offset %ds, value %s)", i, j, k, value.Timestamp, int(value.Timestamp.Sub(instant)/time.Second), actual[k].Timestamp, int(actual[k].Timestamp.Sub(instant)/time.Second), actual[k].Value) } } } for j, alongRange := range scenario.out.alongRange { actual := v.GetRangeValues(fingerprint, interval) if len(actual) != len(alongRange) { t.Fatalf("%d.%d. expected %d output, got %d", i, j, len(alongRange), len(actual)) } for k, value := range alongRange { if value.Value != actual[k].Value { t.Fatalf("%d.%d.%d expected %v value, got %v", i, j, k, value.Value, actual[k].Value) } if !value.Timestamp.Equal(actual[k].Timestamp) { t.Fatalf("%d.%d.%d expected %s (offset %ss) timestamp, got %s (offset %ss)", i, j, k, value.Timestamp, value.Timestamp.Sub(instant), actual[k].Timestamp, actual[k].Timestamp.Sub(instant)) } } } closer.Close() } }
func GetMetricForFingerprintTests(p MetricPersistence, t test.Tester) { testAppendSample(p, model.Sample{ Value: 0, Timestamp: time.Time{}, Metric: model.Metric{ "request_type": "your_mom", }, }, t) testAppendSample(p, model.Sample{ Value: 0, Timestamp: time.Time{}, Metric: model.Metric{ "request_type": "your_dad", "one-off": "value", }, }, t) result, err := p.GetFingerprintsForLabelSet(model.LabelSet{ model.LabelName("request_type"): model.LabelValue("your_mom"), }) if err != nil { t.Error(err) } if len(result) != 1 { t.Errorf("Expected one element.") } v, e := p.GetMetricForFingerprint(result[0]) if e != nil { t.Error(e) } if v == nil { t.Fatal("Did not expect nil.") } metric := *v if len(metric) != 1 { t.Errorf("Expected one-dimensional metric.") } if metric["request_type"] != "your_mom" { t.Errorf("Expected metric to match.") } result, err = p.GetFingerprintsForLabelSet(model.LabelSet{ model.LabelName("request_type"): model.LabelValue("your_dad"), }) if err != nil { t.Error(err) } if len(result) != 1 { t.Errorf("Expected one element.") } v, e = p.GetMetricForFingerprint(result[0]) if v == nil { t.Fatal("Did not expect nil.") } metric = *v if e != nil { t.Error(e) } if len(metric) != 2 { t.Errorf("Expected one-dimensional metric.") } if metric["request_type"] != "your_dad" { t.Errorf("Expected metric to match.") } if metric["one-off"] != "value" { t.Errorf("Expected metric to match.") } }