// This is copied from storage/metric/helpers_test.go, which is unfortunate but // presently required to make things work. func NewTestTieredStorage(t test.Tester) (storage *tiered.TieredStorage, closer test.Closer) { var directory test.TemporaryDirectory directory = test.NewTemporaryDirectory("test_tiered_storage", t) storage, err := tiered.NewTieredStorage(2500, 1000, 5*time.Second, 0*time.Second, directory.Path()) if err != nil { if storage != nil { storage.Close() } directory.Close() t.Fatalf("Error creating storage: %s", err) } if storage == nil { directory.Close() t.Fatalf("storage == nil") } started := make(chan bool) go storage.Serve(started) <-started closer = &testTieredStorageCloser{ storage: storage, directory: directory, } return }
func AppendRepeatingValuesTests(p metric.Persistence, t test.Tester) { m := clientmodel.Metric{ clientmodel.MetricNameLabel: "errors_total", "controller": "foo", "operation": "bar", } increments := 10 repetitions := 500 for i := 0; i < increments; i++ { for j := 0; j < repetitions; j++ { time := clientmodel.Timestamp(0).Add(time.Duration(i) * time.Hour).Add(time.Duration(j) * time.Second) testAppendSamples(p, &clientmodel.Sample{ Value: clientmodel.SampleValue(i), Timestamp: time, Metric: m, }, t) } } v, ok := p.(metric.View) if !ok { // It's purely a benchmark for a Persistence that is not viewable. return } matchers := labelMatchersFromLabelSet(clientmodel.LabelSet{ clientmodel.MetricNameLabel: "errors_total", "controller": "foo", "operation": "bar", }) for i := 0; i < increments; i++ { for j := 0; j < repetitions; j++ { fingerprints, err := p.GetFingerprintsForLabelMatchers(matchers) if err != nil { t.Fatal(err) } if len(fingerprints) != 1 { t.Fatalf("expected %d fingerprints, got %d", 1, len(fingerprints)) } time := clientmodel.Timestamp(0).Add(time.Duration(i) * time.Hour).Add(time.Duration(j) * time.Second) samples := v.GetValueAtTime(fingerprints[0], time) if len(samples) == 0 { t.Fatal("expected at least one sample.") } expected := clientmodel.SampleValue(i) for _, sample := range samples { if sample.Value != expected { t.Fatalf("expected %v value, got %v", expected, sample.Value) } } } } }
func AppendRepeatingValuesTests(p MetricPersistence, t test.Tester) { metric := model.Metric{ model.MetricNameLabel: "errors_total", "controller": "foo", "operation": "bar", } increments := 10 repetitions := 500 for i := 0; i < increments; i++ { for j := 0; j < repetitions; j++ { time := time.Time{}.Add(time.Duration(i) * time.Hour).Add(time.Duration(j) * time.Second) testAppendSample(p, model.Sample{ Value: model.SampleValue(i), Timestamp: time, Metric: metric, }, t) } } if true { // XXX: Purely a benchmark. return } labelSet := model.LabelSet{ model.MetricNameLabel: "errors_total", "controller": "foo", "operation": "bar", } for i := 0; i < increments; i++ { for j := 0; j < repetitions; j++ { fingerprints, err := p.GetFingerprintsForLabelSet(labelSet) if err != nil { t.Fatal(err) } if len(fingerprints) != 1 { t.Fatalf("expected %d fingerprints, got %d", 1, len(fingerprints)) } time := time.Time{}.Add(time.Duration(i) * time.Hour).Add(time.Duration(j) * time.Second) sample, err := p.GetValueAtTime(fingerprints[0], time, StalenessPolicy{}) if err != nil { t.Fatal(err) } if sample == nil { t.Fatal("expected non-nil sample.") } expected := model.SampleValue(i) if sample.Value != expected { t.Fatalf("expected %d value, got %d", expected, sample.Value) } } } }
func newTestTieredStorage(t test.Tester) (storage Storage, closer test.Closer) { var directory test.TemporaryDirectory directory = test.NewTemporaryDirectory("test_tiered_storage", t) storage, err := NewTieredStorage(5000000, 2500, 1000, 5*time.Second, 15*time.Second, 0*time.Second, directory.Path()) if err != nil { t.Fatalf("Error creating storage: %s", err) } if storage == nil { t.Fatalf("storage == nil") } go storage.Serve() closer = &testTieredStorageCloser{ storage: storage, directory: directory, } return }
func GetLabelValuesForLabelNameTests(p metric.Persistence, t test.Tester) { testAppendSamples(p, &clientmodel.Sample{ Value: 0, Timestamp: 0, Metric: clientmodel.Metric{ clientmodel.MetricNameLabel: "my_metric", "request_type": "create", "result": "success", }, }, t) testAppendSamples(p, &clientmodel.Sample{ Value: 0, Timestamp: 0, Metric: clientmodel.Metric{ clientmodel.MetricNameLabel: "my_metric", "request_type": "delete", "outcome": "failure", }, }, t) expectedIndex := map[clientmodel.LabelName]clientmodel.LabelValues{ clientmodel.MetricNameLabel: {"my_metric"}, "request_type": {"create", "delete"}, "result": {"success"}, "outcome": {"failure"}, } for name, expected := range expectedIndex { actual, err := p.GetLabelValuesForLabelName(name) if err != nil { t.Fatalf("Error getting values for label %s: %v", name, err) } if len(actual) != len(expected) { t.Fatalf("Number of values don't match for label %s: got %d; want %d", name, len(actual), len(expected)) } for i := range expected { if actual[i] != expected[i] { t.Fatalf("%d. Got %s; want %s", i, actual[i], expected[i]) } } } }
func GetFingerprintsForLabelSetTests(p metric.Persistence, t test.Tester) { metrics := []clientmodel.Metric{ { clientmodel.MetricNameLabel: "test_metric", "method": "get", "result": "success", }, { clientmodel.MetricNameLabel: "test_metric", "method": "get", "result": "failure", }, { clientmodel.MetricNameLabel: "test_metric", "method": "post", "result": "success", }, { clientmodel.MetricNameLabel: "test_metric", "method": "post", "result": "failure", }, } newTestLabelMatcher := func(matchType metric.MatchType, name clientmodel.LabelName, value clientmodel.LabelValue) *metric.LabelMatcher { m, err := metric.NewLabelMatcher(matchType, name, value) if err != nil { t.Fatalf("Couldn't create label matcher: %v", err) } return m } scenarios := []struct { in metric.LabelMatchers outIndexes []int }{ { in: metric.LabelMatchers{ newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "test_metric"), }, outIndexes: []int{0, 1, 2, 3}, }, { in: metric.LabelMatchers{ newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "non_existent_metric"), }, outIndexes: []int{}, }, { in: metric.LabelMatchers{ newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "non_existent_metric"), newTestLabelMatcher(metric.Equal, "result", "success"), }, outIndexes: []int{}, }, { in: metric.LabelMatchers{ newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "test_metric"), newTestLabelMatcher(metric.Equal, "result", "success"), }, outIndexes: []int{0, 2}, }, { in: metric.LabelMatchers{ newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "test_metric"), newTestLabelMatcher(metric.NotEqual, "result", "success"), }, outIndexes: []int{1, 3}, }, { in: metric.LabelMatchers{ newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "test_metric"), newTestLabelMatcher(metric.RegexMatch, "result", "foo|success|bar"), }, outIndexes: []int{0, 2}, }, { in: metric.LabelMatchers{ newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "test_metric"), newTestLabelMatcher(metric.RegexNoMatch, "result", "foo|success|bar"), }, outIndexes: []int{1, 3}, }, { in: metric.LabelMatchers{ newTestLabelMatcher(metric.Equal, clientmodel.MetricNameLabel, "test_metric"), newTestLabelMatcher(metric.RegexNoMatch, "result", "foo|success|bar"), newTestLabelMatcher(metric.RegexMatch, "method", "os"), }, outIndexes: []int{3}, }, } for _, m := range metrics { testAppendSamples(p, &clientmodel.Sample{ Value: 0, Timestamp: 0, Metric: m, }, t) } for i, s := range scenarios { actualFps, err := p.GetFingerprintsForLabelMatchers(s.in) if err != nil { t.Fatalf("%d. Couldn't get fingerprints for label matchers: %v", i, err) } expectedFps := clientmodel.Fingerprints{} for _, i := range s.outIndexes { fp := &clientmodel.Fingerprint{} fp.LoadFromMetric(metrics[i]) expectedFps = append(expectedFps, fp) } sort.Sort(actualFps) sort.Sort(expectedFps) if len(actualFps) != len(expectedFps) { t.Fatalf("%d. Got %d fingerprints; want %d", i, len(actualFps), len(expectedFps)) } for j, actualFp := range actualFps { if !actualFp.Equal(expectedFps[j]) { t.Fatalf("%d.%d. Got fingerprint %v; want %v", i, j, actualFp, expectedFps[j]) } } } }
func testBuilder(t test.Tester) { type atTime struct { fingerprint string time time.Time } type atInterval struct { fingerprint string from time.Time through time.Time interval time.Duration } type atRange struct { fingerprint string from time.Time through time.Time } type in struct { atTimes []atTime atIntervals []atInterval atRanges []atRange } type out []struct { fingerprint string operations ops } var scenarios = []struct { in in out out }{ // // Ensure that the fingerprint is sorted in proper order. { in: in{ atTimes: []atTime{ { fingerprint: "0000000000000001111-a-4-a", time: time.Unix(100, 0), }, { fingerprint: "0000000000000000000-a-4-a", time: time.Unix(100, 0), }, }, }, out: out{ { fingerprint: "00000000000000000000-a-4-a", }, { fingerprint: "00000000000000001111-a-4-a", }, }, }, // // Ensure that the fingerprint-timestamp pairs are sorted in proper order. { in: in{ atTimes: []atTime{ { fingerprint: "1111-a-4-a", time: time.Unix(100, 0), }, { fingerprint: "1111-a-4-a", time: time.Unix(200, 0), }, { fingerprint: "0-a-4-a", time: time.Unix(100, 0), }, { fingerprint: "0-a-4-a", time: time.Unix(0, 0), }, }, }, out: out{ { fingerprint: "00000000000000000000-a-4-a", }, { fingerprint: "00000000000000001111-a-4-a", }, }, }, // Ensure grouping of operations { in: in{ atTimes: []atTime{ { fingerprint: "1111-a-4-a", time: time.Unix(100, 0), }, }, atRanges: []atRange{ { fingerprint: "1111-a-4-a", from: time.Unix(100, 0), through: time.Unix(1000, 0), }, { fingerprint: "1111-a-4-a", from: time.Unix(100, 0), through: time.Unix(9000, 0), }, }, }, out: out{ { fingerprint: "00000000000000001111-a-4-a", }, }, }, } for i, scenario := range scenarios { builder := viewRequestBuilder{ operations: map[model.Fingerprint]ops{}, } for _, atTime := range scenario.in.atTimes { fingerprint := model.NewFingerprintFromRowKey(atTime.fingerprint) builder.GetMetricAtTime(fingerprint, atTime.time) } for _, atInterval := range scenario.in.atIntervals { fingerprint := model.NewFingerprintFromRowKey(atInterval.fingerprint) builder.GetMetricAtInterval(fingerprint, atInterval.from, atInterval.through, atInterval.interval) } for _, atRange := range scenario.in.atRanges { fingerprint := model.NewFingerprintFromRowKey(atRange.fingerprint) builder.GetMetricRange(fingerprint, atRange.from, atRange.through) } jobs := builder.ScanJobs() if len(scenario.out) != len(jobs) { t.Fatalf("%d. expected job length of %d, got %d\n", i, len(scenario.out), len(jobs)) } for j, job := range scenario.out { if jobs[j].fingerprint.ToRowKey() != job.fingerprint { t.Fatalf("%d.%d. expected fingerprint %s, got %s\n", i, j, job.fingerprint, jobs[j].fingerprint.ToRowKey()) } } } }
func GetBoundaryValuesTests(persistenceMaker func() (MetricPersistence, test.Closer), t test.Tester) { type value struct { year int month time.Month day int hour int value float32 } type input struct { openYear int openMonth time.Month openDay int openHour int endYear int endMonth time.Month endDay int endHour int staleness time.Duration } type output struct { open model.SampleValue end model.SampleValue } type behavior struct { name string input input output *output } var contexts = []struct { name string values []value behaviors []behavior }{ { name: "no values", values: []value{}, behaviors: []behavior{ { name: "non-existent interval without staleness policy", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, staleness: time.Duration(0), }, }, { name: "non-existent interval with staleness policy", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, staleness: time.Duration(365*24) * time.Hour, }, }, }, }, { name: "single value", values: []value{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, }, behaviors: []behavior{ { name: "on start but missing end without staleness policy", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, staleness: time.Duration(0), }, }, { name: "non-existent interval after within staleness policy", input: input{ openYear: 1984, openMonth: 3, openDay: 31, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, staleness: time.Duration(4380) * time.Hour, }, }, { name: "non-existent interval after without staleness policy", input: input{ openYear: 1984, openMonth: 3, openDay: 31, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, staleness: time.Duration(0), }, }, { name: "non-existent interval before with staleness policy", input: input{ openYear: 1983, openMonth: 3, openDay: 30, openHour: 0, endYear: 1984, endMonth: 3, endDay: 29, endHour: 0, staleness: time.Duration(365*24) * time.Hour, }, }, { name: "non-existent interval before without staleness policy", input: input{ openYear: 1983, openMonth: 3, openDay: 30, openHour: 0, endYear: 1984, endMonth: 3, endDay: 29, endHour: 0, staleness: time.Duration(0), }, }, { name: "on end but not start without staleness policy", input: input{ openYear: 1983, openMonth: 3, openDay: 30, openHour: 0, endYear: 1984, endMonth: 3, endDay: 30, endHour: 0, staleness: time.Duration(0), }, }, { name: "on end but not start without staleness policy", input: input{ openYear: 1983, openMonth: 3, openDay: 30, openHour: 0, endYear: 1984, endMonth: 3, endDay: 30, endHour: 0, staleness: time.Duration(365*24) * time.Hour, }, }, { name: "before point without staleness policy", input: input{ openYear: 1982, openMonth: 3, openDay: 30, openHour: 0, endYear: 1983, endMonth: 3, endDay: 30, endHour: 0, staleness: time.Duration(0), }, }, { name: "before point with staleness policy", input: input{ openYear: 1982, openMonth: 3, openDay: 30, openHour: 0, endYear: 1983, endMonth: 3, endDay: 30, endHour: 0, staleness: time.Duration(365*24) * time.Hour, }, }, { name: "after point without staleness policy", input: input{ openYear: 1985, openMonth: 3, openDay: 30, openHour: 0, endYear: 1986, endMonth: 3, endDay: 30, endHour: 0, staleness: time.Duration(0), }, }, { name: "after point with staleness policy", input: input{ openYear: 1985, openMonth: 3, openDay: 30, openHour: 0, endYear: 1986, endMonth: 3, endDay: 30, endHour: 0, staleness: time.Duration(365*24) * time.Hour, }, }, { name: "spanning point without staleness policy", input: input{ openYear: 1983, openMonth: 9, openDay: 29, openHour: 12, endYear: 1984, endMonth: 9, endDay: 28, endHour: 12, staleness: time.Duration(0), }, }, { name: "spanning point with staleness policy", input: input{ openYear: 1983, openMonth: 9, openDay: 29, openHour: 12, endYear: 1984, endMonth: 9, endDay: 28, endHour: 12, staleness: time.Duration(365*24) * time.Hour, }, }, }, }, { name: "double values", values: []value{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, }, behaviors: []behavior{ { name: "on points without staleness policy", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, staleness: time.Duration(0), }, output: &output{ open: 0, end: 1, }, }, { name: "on points with staleness policy", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, staleness: time.Duration(365*24) * time.Hour, }, output: &output{ open: 0, end: 1, }, }, { name: "on first before second outside of staleness", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1984, endMonth: 6, endDay: 29, endHour: 6, staleness: time.Duration(2190) * time.Hour, }, }, { name: "on first before second within staleness", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1984, endMonth: 6, endDay: 29, endHour: 6, staleness: time.Duration(356*24) * time.Hour, }, }, { name: "on first after second outside of staleness", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 6, endDay: 29, endHour: 6, staleness: time.Duration(1) * time.Hour, }, }, { name: "on first after second within staleness", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 6, endDay: 29, endHour: 6, staleness: time.Duration(356*24) * time.Hour, }, output: &output{ open: 0, end: 1, }, }, }, }, } for i, context := range contexts { // Wrapping in function to enable garbage collection of resources. func() { p, closer := persistenceMaker() defer closer.Close() defer p.Close() m := model.Metric{ model.MetricNameLabel: "age_in_years", } for _, value := range context.values { testAppendSample(p, model.Sample{ Value: model.SampleValue(value.value), Timestamp: time.Date(value.year, value.month, value.day, value.hour, 0, 0, 0, time.UTC), Metric: m, }, t) } for j, behavior := range context.behaviors { input := behavior.input open := time.Date(input.openYear, input.openMonth, input.openDay, input.openHour, 0, 0, 0, time.UTC) end := time.Date(input.endYear, input.endMonth, input.endDay, input.endHour, 0, 0, 0, time.UTC) interval := model.Interval{ OldestInclusive: open, NewestInclusive: end, } po := StalenessPolicy{ DeltaAllowance: input.staleness, } openValue, endValue, err := p.GetBoundaryValues(model.NewFingerprintFromMetric(m), interval, po) if err != nil { t.Fatalf("%d.%d(%s). Could not query for value: %q\n", i, j, behavior.name, err) } if behavior.output == nil { if openValue != nil { t.Fatalf("%d.%d(%s). Expected open to be nil but got: %q\n", i, j, behavior.name, openValue) } if endValue != nil { t.Fatalf("%d.%d(%s). Expected end to be nil but got: %q\n", i, j, behavior.name, endValue) } } else { if openValue == nil { t.Fatalf("%d.%d(%s). Expected open to be %s but got nil\n", i, j, behavior.name, behavior.output) } if endValue == nil { t.Fatalf("%d.%d(%s). Expected end to be %s but got nil\n", i, j, behavior.name, behavior.output) } if openValue.Value != behavior.output.open { t.Fatalf("%d.%d(%s). Expected open to be %s but got %s\n", i, j, behavior.name, behavior.output.open, openValue.Value) } if endValue.Value != behavior.output.end { t.Fatalf("%d.%d(%s). Expected end to be %s but got %s\n", i, j, behavior.name, behavior.output.end, endValue.Value) } } } }() } }
func testOptimize(t test.Tester) { var ( out ops scenarios = []struct { in ops out ops }{ // Empty set; return empty set. { in: ops{}, out: ops{}, }, // Single time; return single time. { in: ops{ &getValuesAtTimeOp{ time: testInstant, }, }, out: ops{ &getValuesAtTimeOp{ time: testInstant, }, }, }, // Single range; return single range. { in: ops{ &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), }, }, out: ops{ &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), }, }, }, // Single interval; return single interval. { in: ops{ &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), interval: time.Second * 5, }, }, out: ops{ &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), interval: time.Second * 5, }, }, }, // Duplicate points; return single point. { in: ops{ &getValuesAtTimeOp{ time: testInstant, }, &getValuesAtTimeOp{ time: testInstant, }, }, out: ops{ &getValuesAtTimeOp{ time: testInstant, }, }, }, // Duplicate ranges; return single range. { in: ops{ &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), }, &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), }, }, out: ops{ &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), }, }, }, // Duplicate intervals; return single interval. { in: ops{ &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), interval: time.Second * 5, }, &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), interval: time.Second * 5, }, }, out: ops{ &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), interval: time.Second * 5, }, }, }, // Subordinate interval; return master. { in: ops{ &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), interval: time.Second * 5, }, &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(2 * time.Minute), interval: time.Second * 5, }, }, out: ops{ &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(2 * time.Minute), interval: time.Second * 5, }, }, }, // Subordinate range; return master. { in: ops{ &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), }, &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(2 * time.Minute), }, }, out: ops{ &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(2 * time.Minute), }, }, }, // Equal range with different interval; return both. { in: ops{ &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), interval: time.Second * 10, }, &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), interval: time.Second * 5, }, }, out: ops{ &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), interval: time.Second * 5, }, &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), interval: time.Second * 10, }, }, }, // Different range with different interval; return best. { in: ops{ &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(2 * time.Minute), interval: time.Second * 10, }, &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), interval: time.Second * 10, }, &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), interval: time.Second * 5, }, &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(2 * time.Minute), interval: time.Second * 5, }, }, out: ops{ &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(2 * time.Minute), interval: time.Second * 5, }, &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(2 * time.Minute), interval: time.Second * 10, }, }, }, // Include Truncated Intervals with Range. { in: ops{ &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(2 * time.Minute), interval: time.Second * 10, }, &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(1 * time.Minute), interval: time.Second * 10, }, &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(30 * time.Second), }, }, out: ops{ &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(30 * time.Second), }, &getValuesAtIntervalOp{ from: testInstant.Add(30 * time.Second), through: testInstant.Add(2 * time.Minute), interval: time.Second * 10, }, }, }, // Compacted Forward Truncation { in: ops{ &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(3 * time.Minute), interval: time.Second * 10, }, &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(2 * time.Minute), }, }, out: ops{ &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(2 * time.Minute), }, &getValuesAtIntervalOp{ from: testInstant.Add(2 * time.Minute), through: testInstant.Add(3 * time.Minute), interval: time.Second * 10, }, }, }, // Compacted Tail Truncation { in: ops{ &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(3 * time.Minute), interval: time.Second * 10, }, &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(2 * time.Minute), }, }, out: ops{ &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(2 * time.Minute), }, &getValuesAtIntervalOp{ from: testInstant.Add(2 * time.Minute), through: testInstant.Add(3 * time.Minute), interval: time.Second * 10, }, }, }, // Compact Interval with Subservient Range { in: ops{ &getValuesAtIntervalOp{ from: testInstant.Add(1 * time.Minute), through: testInstant.Add(2 * time.Minute), interval: time.Second * 10, }, &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(3 * time.Minute), }, }, out: ops{ &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(3 * time.Minute), }, }, }, // Compact Ranges with Subservient Range { in: ops{ &getValuesAlongRangeOp{ from: testInstant.Add(1 * time.Minute), through: testInstant.Add(2 * time.Minute), }, &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(3 * time.Minute), }, }, out: ops{ &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(3 * time.Minute), }, }, }, // Carving Middle Elements { in: ops{ &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(5 * time.Minute), interval: time.Second * 10, }, &getValuesAlongRangeOp{ from: testInstant.Add(2 * time.Minute), through: testInstant.Add(3 * time.Minute), }, }, out: ops{ &getValuesAtIntervalOp{ from: testInstant, through: testInstant.Add(2 * time.Minute), interval: time.Second * 10, }, &getValuesAlongRangeOp{ from: testInstant.Add(2 * time.Minute), through: testInstant.Add(3 * time.Minute), }, &getValuesAtIntervalOp{ // Since the range operation consumes Now() + 3 Minutes, we start // an additional ten seconds later. from: testInstant.Add(3 * time.Minute).Add(10 * time.Second), through: testInstant.Add(5 * time.Minute), interval: time.Second * 10, }, }, }, // Compact Subservient Points with Range // The points are at half-minute offsets due to optimizeTimeGroups // work. { in: ops{ &getValuesAtTimeOp{ time: testInstant.Add(30 * time.Second), }, &getValuesAtTimeOp{ time: testInstant.Add(1 * time.Minute).Add(30 * time.Second), }, &getValuesAtTimeOp{ time: testInstant.Add(2 * time.Minute).Add(30 * time.Second), }, &getValuesAtTimeOp{ time: testInstant.Add(3 * time.Minute).Add(30 * time.Second), }, &getValuesAtTimeOp{ time: testInstant.Add(4 * time.Minute).Add(30 * time.Second), }, &getValuesAtTimeOp{ time: testInstant.Add(5 * time.Minute).Add(30 * time.Second), }, &getValuesAtTimeOp{ time: testInstant.Add(6 * time.Minute).Add(30 * time.Second), }, &getValuesAlongRangeOp{ from: testInstant.Add(1 * time.Minute), through: testInstant.Add(5 * time.Minute), }, }, out: ops{ &getValuesAtTimeOp{ time: testInstant.Add(30 * time.Second), }, &getValuesAlongRangeOp{ from: testInstant.Add(1 * time.Minute), through: testInstant.Add(5 * time.Minute), }, &getValuesAtTimeOp{ time: testInstant.Add(5 * time.Minute).Add(30 * time.Second), }, &getValuesAtTimeOp{ time: testInstant.Add(6 * time.Minute).Add(30 * time.Second), }, }, }, // Regression Validation 1: Multiple Overlapping Interval Requests // We expect to find compaction. { in: ops{ &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(5 * time.Minute), }, &getValuesAlongRangeOp{ from: testInstant.Add(15 * time.Second), through: testInstant.Add(15 * time.Second).Add(5 * time.Minute), }, &getValuesAlongRangeOp{ from: testInstant.Add(30 * time.Second), through: testInstant.Add(30 * time.Second).Add(5 * time.Minute), }, &getValuesAlongRangeOp{ from: testInstant.Add(45 * time.Second), through: testInstant.Add(45 * time.Second).Add(5 * time.Minute), }, }, out: ops{ &getValuesAlongRangeOp{ from: testInstant, through: testInstant.Add(45 * time.Second).Add(5 * time.Minute), }, }, }, } ) for i, scenario := range scenarios { // The compaction system assumes that values are sorted on input. sort.Sort(startsAtSort{scenario.in}) out = optimize(scenario.in) if len(out) != len(scenario.out) { t.Fatalf("%d. expected length of %d, got %d", i, len(scenario.out), len(out)) } for j, op := range out { if actual, ok := op.(*getValuesAtTimeOp); ok { if expected, ok := scenario.out[j].(*getValuesAtTimeOp); ok { if expected.time.Unix() != actual.time.Unix() { t.Fatalf("%d.%d. expected time %s, got %s", i, j, expected.time, actual.time) } } else { t.Fatalf("%d.%d. expected getValuesAtTimeOp, got %s", i, j, actual) } } else if actual, ok := op.(*getValuesAtIntervalOp); ok { if expected, ok := scenario.out[j].(*getValuesAtIntervalOp); ok { // Shaving off nanoseconds. if expected.from.Unix() != actual.from.Unix() { t.Fatalf("%d.%d. expected from %s, got %s", i, j, expected.from, actual.from) } if expected.through.Unix() != actual.through.Unix() { t.Fatalf("%d.%d. expected through %s, got %s", i, j, expected.through, actual.through) } if expected.interval != (actual.interval) { t.Fatalf("%d.%d. expected interval %s, got %s", i, j, expected.interval, actual.interval) } } else { t.Fatalf("%d.%d. expected getValuesAtIntervalOp, got %s", i, j, actual) } } else if actual, ok := op.(*getValuesAlongRangeOp); ok { if expected, ok := scenario.out[j].(*getValuesAlongRangeOp); ok { if expected.from.Unix() != actual.from.Unix() { t.Fatalf("%d.%d. expected from %s, got %s", i, j, expected.from, actual.from) } if expected.through.Unix() != actual.through.Unix() { t.Fatalf("%d.%d. expected through %s, got %s", i, j, expected.through, actual.through) } } else { t.Fatalf("%d.%d. expected getValuesAlongRangeOp, got %s", i, j, actual) } } } } }
func GetRangeValuesTests(persistenceMaker func() (MetricPersistence, test.Closer), t test.Tester) { type value struct { year int month time.Month day int hour int value float32 } type input struct { openYear int openMonth time.Month openDay int openHour int endYear int endMonth time.Month endDay int endHour int } type output struct { year int month time.Month day int hour int value float32 } type behavior struct { name string input input output []output } var contexts = []struct { name string values []value behaviors []behavior }{ { name: "no values", values: []value{}, behaviors: []behavior{ { name: "non-existent interval", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, }, }, }, }, { name: "singleton value", values: []value{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, }, behaviors: []behavior{ { name: "start on first value", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, }, }, { name: "end on first value", input: input{ openYear: 1983, openMonth: 3, openDay: 30, openHour: 0, endYear: 1984, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, }, }, { name: "overlap on first value", input: input{ openYear: 1983, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, }, }, }, }, { name: "two values", values: []value{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, }, behaviors: []behavior{ { name: "start on first value", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, }, }, { name: "start on second value", input: input{ openYear: 1985, openMonth: 3, openDay: 30, openHour: 0, endYear: 1986, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, }, }, { name: "end on first value", input: input{ openYear: 1983, openMonth: 3, openDay: 30, openHour: 0, endYear: 1984, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, }, }, { name: "end on second value", input: input{ openYear: 1985, openMonth: 1, openDay: 1, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, }, }, { name: "overlap on values", input: input{ openYear: 1983, openMonth: 3, openDay: 30, openHour: 0, endYear: 1986, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, }, }, }, }, } for i, context := range contexts { // Wrapping in function to enable garbage collection of resources. func() { p, closer := persistenceMaker() defer closer.Close() defer p.Close() m := model.Metric{ model.MetricNameLabel: "age_in_years", } for _, value := range context.values { testAppendSample(p, model.Sample{ Value: model.SampleValue(value.value), Timestamp: time.Date(value.year, value.month, value.day, value.hour, 0, 0, 0, time.UTC), Metric: m, }, t) } for j, behavior := range context.behaviors { input := behavior.input open := time.Date(input.openYear, input.openMonth, input.openDay, input.openHour, 0, 0, 0, time.UTC) end := time.Date(input.endYear, input.endMonth, input.endDay, input.endHour, 0, 0, 0, time.UTC) in := model.Interval{ OldestInclusive: open, NewestInclusive: end, } values, err := p.GetRangeValues(model.NewFingerprintFromMetric(m), in) if err != nil { t.Fatalf("%d.%d(%s). Could not query for value: %q\n", i, j, behavior.name, err) } if values == nil && len(behavior.output) != 0 { t.Fatalf("%d.%d(%s). Expected %s but got: %s\n", i, j, behavior.name, behavior.output, values) } if behavior.output == nil { if values != nil { t.Fatalf("%d.%d(%s). Expected nil values but got: %s\n", i, j, behavior.name, values) } } else { if len(behavior.output) != len(values.Values) { t.Fatalf("%d.%d(%s). Expected length %d but got: %d\n", i, j, behavior.name, len(behavior.output), len(values.Values)) } for k, actual := range values.Values { expected := behavior.output[k] if actual.Value != model.SampleValue(expected.value) { t.Fatalf("%d.%d.%d(%s). Expected %d but got: %d\n", i, j, k, behavior.name, expected.value, actual.Value) } if actual.Timestamp.Year() != expected.year { t.Fatalf("%d.%d.%d(%s). Expected %d but got: %d\n", i, j, k, behavior.name, expected.year, actual.Timestamp.Year()) } if actual.Timestamp.Month() != expected.month { t.Fatalf("%d.%d.%d(%s). Expected %d but got: %d\n", i, j, k, behavior.name, expected.month, actual.Timestamp.Month()) } // XXX: Find problem here. // Mismatches occur in this and have for a long time in the LevelDB // case, however not im-memory. // // if actual.Timestamp.Day() != expected.day { // t.Fatalf("%d.%d.%d(%s). Expected %d but got: %d\n", i, j, k, behavior.name, expected.day, actual.Timestamp.Day()) // } // if actual.Timestamp.Hour() != expected.hour { // t.Fatalf("%d.%d.%d(%s). Expected %d but got: %d\n", i, j, k, behavior.name, expected.hour, actual.Timestamp.Hour()) // } } } } }() } }
func testMakeView(t test.Tester) { type in struct { atTime []getValuesAtTimeOp atInterval []getValuesAtIntervalOp alongRange []getValuesAlongRangeOp } type out struct { atTime [][]model.SamplePair atInterval [][]model.SamplePair alongRange [][]model.SamplePair } var ( instant = time.Date(1984, 3, 30, 0, 0, 0, 0, time.Local) metric = model.Metric{model.MetricNameLabel: "request_count"} fingerprint = model.NewFingerprintFromMetric(metric) scenarios = []struct { data []model.Sample in in out out }{ // No sample, but query asks for one. { in: in{ atTime: []getValuesAtTimeOp{ { time: instant, }, }, }, out: out{ atTime: [][]model.SamplePair{{}}, }, }, // Single sample, query asks for exact sample time. { data: []model.Sample{ { Metric: metric, Value: 0, Timestamp: instant, }, }, in: in{ atTime: []getValuesAtTimeOp{ { time: instant, }, }, }, out: out{ atTime: [][]model.SamplePair{ { { Timestamp: instant, Value: 0, }, }, }, }, }, // Single sample, query time before the sample. { data: []model.Sample{ { Metric: metric, Value: 0, Timestamp: instant.Add(time.Second), }, { Metric: metric, Value: 1, Timestamp: instant.Add(time.Second * 2), }, }, in: in{ atTime: []getValuesAtTimeOp{ { time: instant, }, }, }, out: out{ atTime: [][]model.SamplePair{ { { Timestamp: instant.Add(time.Second), Value: 0, }, }, }, }, }, // Single sample, query time after the sample. { data: []model.Sample{ { Metric: metric, Value: 0, Timestamp: instant, }, }, in: in{ atTime: []getValuesAtTimeOp{ { time: instant.Add(time.Second), }, }, }, out: out{ atTime: [][]model.SamplePair{ { { Timestamp: instant, Value: 0, }, }, }, }, }, // Two samples, query asks for first sample time. { data: []model.Sample{ { Metric: metric, Value: 0, Timestamp: instant, }, { Metric: metric, Value: 1, Timestamp: instant.Add(time.Second), }, }, in: in{ atTime: []getValuesAtTimeOp{ { time: instant, }, }, }, out: out{ atTime: [][]model.SamplePair{ { { Timestamp: instant, Value: 0, }, }, }, }, }, // Three samples, query asks for second sample time. { data: []model.Sample{ { Metric: metric, Value: 0, Timestamp: instant, }, { Metric: metric, Value: 1, Timestamp: instant.Add(time.Second), }, { Metric: metric, Value: 2, Timestamp: instant.Add(time.Second * 2), }, }, in: in{ atTime: []getValuesAtTimeOp{ { time: instant.Add(time.Second), }, }, }, out: out{ atTime: [][]model.SamplePair{ { { Timestamp: instant.Add(time.Second), Value: 1, }, }, }, }, }, // Three samples, query asks for time between first and second samples. { data: []model.Sample{ { Metric: metric, Value: 0, Timestamp: instant, }, { Metric: metric, Value: 1, Timestamp: instant.Add(time.Second * 2), }, { Metric: metric, Value: 2, Timestamp: instant.Add(time.Second * 4), }, }, in: in{ atTime: []getValuesAtTimeOp{ { time: instant.Add(time.Second), }, }, }, out: out{ atTime: [][]model.SamplePair{ { { Timestamp: instant, Value: 0, }, { Timestamp: instant.Add(time.Second * 2), Value: 1, }, }, }, }, }, // Three samples, query asks for time between second and third samples. { data: []model.Sample{ { Metric: metric, Value: 0, Timestamp: instant, }, { Metric: metric, Value: 1, Timestamp: instant.Add(time.Second * 2), }, { Metric: metric, Value: 2, Timestamp: instant.Add(time.Second * 4), }, }, in: in{ atTime: []getValuesAtTimeOp{ { time: instant.Add(time.Second * 3), }, }, }, out: out{ atTime: [][]model.SamplePair{ { { Timestamp: instant.Add(time.Second * 2), Value: 1, }, { Timestamp: instant.Add(time.Second * 4), Value: 2, }, }, }, }, }, // Two chunks of samples, query asks for values from first chunk. { data: buildSamples(instant, instant.Add(time.Duration(*leveldbChunkSize*2)*time.Second), time.Second, metric), in: in{ atTime: []getValuesAtTimeOp{ { time: instant.Add(time.Second*time.Duration(*leveldbChunkSize/2) + 1), }, }, }, out: out{ atTime: [][]model.SamplePair{ { { Timestamp: instant.Add(time.Second * time.Duration(*leveldbChunkSize/2)), Value: 100, }, { Timestamp: instant.Add(time.Second * (time.Duration(*leveldbChunkSize/2) + 1)), Value: 101, }, }, }, }, }, } ) for i, scenario := range scenarios { tiered, closer := newTestTieredStorage(t) defer closer.Close() for j, datum := range scenario.data { err := tiered.AppendSample(datum) if err != nil { t.Fatalf("%d.%d. failed to add fixture data: %s", i, j, err) } } tiered.Flush() requestBuilder := NewViewRequestBuilder() for _, atTime := range scenario.in.atTime { requestBuilder.GetMetricAtTime(fingerprint, atTime.time) } for _, atInterval := range scenario.in.atInterval { requestBuilder.GetMetricAtInterval(fingerprint, atInterval.from, atInterval.through, atInterval.interval) } for _, alongRange := range scenario.in.alongRange { requestBuilder.GetMetricRange(fingerprint, alongRange.from, alongRange.through) } v, err := tiered.MakeView(requestBuilder, time.Second*5) if err != nil { t.Fatalf("%d. failed due to %s", i, err) } for j, atTime := range scenario.in.atTime { actual := v.GetValueAtTime(fingerprint, atTime.time) if len(actual) != len(scenario.out.atTime[j]) { t.Fatalf("%d.%d. expected %d output, got %d", i, j, len(scenario.out.atTime[j]), len(actual)) } for k, value := range scenario.out.atTime[j] { if value.Value != actual[k].Value { t.Fatalf("%d.%d.%d expected %v value, got %v", i, j, k, value.Value, actual[k].Value) } if !value.Timestamp.Equal(actual[k].Timestamp) { t.Fatalf("%d.%d.%d expected %s timestamp, got %s", i, j, k, value.Timestamp, actual[k].Timestamp) } } } tiered.Drain() } }
func GetRangeValuesTests(persistenceMaker func() (metric.ViewablePersistence, test.Closer), onlyBoundaries bool, t test.Tester) { type value struct { year int month time.Month day int hour int value clientmodel.SampleValue } type input struct { openYear int openMonth time.Month openDay int openHour int endYear int endMonth time.Month endDay int endHour int } type output struct { year int month time.Month day int hour int value clientmodel.SampleValue } type behavior struct { name string input input output []output } var contexts = []struct { name string values []value behaviors []behavior }{ { name: "no values", values: []value{}, behaviors: []behavior{ { name: "non-existent interval", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, }, }, }, }, { name: "singleton value", values: []value{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, }, behaviors: []behavior{ { name: "start on first value", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, }, }, { name: "end on first value", input: input{ openYear: 1983, openMonth: 3, openDay: 30, openHour: 0, endYear: 1984, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, }, }, { name: "overlap on first value", input: input{ openYear: 1983, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, }, }, }, }, { name: "two values", values: []value{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, }, behaviors: []behavior{ { name: "start on first value", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, }, }, { name: "start on second value", input: input{ openYear: 1985, openMonth: 3, openDay: 30, openHour: 0, endYear: 1986, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, }, }, { name: "end on first value", input: input{ openYear: 1983, openMonth: 3, openDay: 30, openHour: 0, endYear: 1984, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, }, }, { name: "end on second value", input: input{ openYear: 1985, openMonth: 1, openDay: 1, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, }, }, { name: "overlap on values", input: input{ openYear: 1983, openMonth: 3, openDay: 30, openHour: 0, endYear: 1986, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, }, }, }, }, { name: "three values", values: []value{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, { year: 1986, month: 3, day: 30, hour: 0, value: 2, }, }, behaviors: []behavior{ { name: "start on first value", input: input{ openYear: 1984, openMonth: 3, openDay: 30, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, }, }, { name: "start on second value", input: input{ openYear: 1985, openMonth: 3, openDay: 30, openHour: 0, endYear: 1986, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, { year: 1986, month: 3, day: 30, hour: 0, value: 2, }, }, }, { name: "end on first value", input: input{ openYear: 1983, openMonth: 3, openDay: 30, openHour: 0, endYear: 1984, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, }, }, { name: "end on second value", input: input{ openYear: 1985, openMonth: 1, openDay: 1, openHour: 0, endYear: 1985, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, }, }, { name: "overlap on values", input: input{ openYear: 1983, openMonth: 3, openDay: 30, openHour: 0, endYear: 1986, endMonth: 3, endDay: 30, endHour: 0, }, output: []output{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, { year: 1986, month: 3, day: 30, hour: 0, value: 2, }, }, }, }, }, } for i, context := range contexts { // Wrapping in function to enable garbage collection of resources. func() { p, closer := persistenceMaker() defer closer.Close() defer p.Close() m := clientmodel.Metric{ clientmodel.MetricNameLabel: "age_in_years", } for _, value := range context.values { testAppendSamples(p, &clientmodel.Sample{ Value: clientmodel.SampleValue(value.value), Timestamp: clientmodel.TimestampFromTime(time.Date(value.year, value.month, value.day, value.hour, 0, 0, 0, time.UTC)), Metric: m, }, t) } for j, behavior := range context.behaviors { input := behavior.input open := clientmodel.TimestampFromTime(time.Date(input.openYear, input.openMonth, input.openDay, input.openHour, 0, 0, 0, time.UTC)) end := clientmodel.TimestampFromTime(time.Date(input.endYear, input.endMonth, input.endDay, input.endHour, 0, 0, 0, time.UTC)) in := metric.Interval{ OldestInclusive: open, NewestInclusive: end, } actualValues := metric.Values{} expectedValues := []output{} fp := &clientmodel.Fingerprint{} fp.LoadFromMetric(m) if onlyBoundaries { actualValues = p.GetBoundaryValues(fp, in) l := len(behavior.output) if l == 1 { expectedValues = behavior.output[0:1] } if l > 1 { expectedValues = append(behavior.output[0:1], behavior.output[l-1]) } } else { actualValues = p.GetRangeValues(fp, in) expectedValues = behavior.output } if actualValues == nil && len(expectedValues) != 0 { t.Fatalf("%d.%d(%s). Expected %v but got: %v\n", i, j, behavior.name, expectedValues, actualValues) } if expectedValues == nil { if actualValues != nil { t.Fatalf("%d.%d(%s). Expected nil values but got: %s\n", i, j, behavior.name, actualValues) } } else { if len(expectedValues) != len(actualValues) { t.Fatalf("%d.%d(%s). Expected length %d but got: %d\n", i, j, behavior.name, len(expectedValues), len(actualValues)) } for k, actual := range actualValues { expected := expectedValues[k] if actual.Value != clientmodel.SampleValue(expected.value) { t.Fatalf("%d.%d.%d(%s). Expected %v but got: %v\n", i, j, k, behavior.name, expected.value, actual.Value) } if actual.Timestamp.Time().Year() != expected.year { t.Fatalf("%d.%d.%d(%s). Expected %d but got: %d\n", i, j, k, behavior.name, expected.year, actual.Timestamp.Time().Year()) } if actual.Timestamp.Time().Month() != expected.month { t.Fatalf("%d.%d.%d(%s). Expected %d but got: %d\n", i, j, k, behavior.name, expected.month, actual.Timestamp.Time().Month()) } // XXX: Find problem here. // Mismatches occur in this and have for a long time in the LevelDB // case, however not im-memory. // // if actual.Timestamp.Day() != expected.day { // t.Fatalf("%d.%d.%d(%s). Expected %d but got: %d\n", i, j, k, behavior.name, expected.day, actual.Timestamp.Day()) // } // if actual.Timestamp.Hour() != expected.hour { // t.Fatalf("%d.%d.%d(%s). Expected %d but got: %d\n", i, j, k, behavior.name, expected.hour, actual.Timestamp.Hour()) // } } } } }() } }
func GetValueAtTimeTests(persistenceMaker func() (metric.ViewablePersistence, test.Closer), t test.Tester) { type value struct { year int month time.Month day int hour int value clientmodel.SampleValue } type input struct { year int month time.Month day int hour int } type output []clientmodel.SampleValue type behavior struct { name string input input output output } var contexts = []struct { name string values []value behaviors []behavior }{ { name: "no values", values: []value{}, behaviors: []behavior{ { name: "random target", input: input{ year: 1984, month: 3, day: 30, hour: 0, }, }, }, }, { name: "singleton", values: []value{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, }, behaviors: []behavior{ { name: "exact", input: input{ year: 1984, month: 3, day: 30, hour: 0, }, output: output{ 0, }, }, { name: "before", input: input{ year: 1984, month: 3, day: 29, hour: 0, }, output: output{ 0, }, }, { name: "after", input: input{ year: 1984, month: 3, day: 31, hour: 0, }, output: output{ 0, }, }, }, }, { name: "double", values: []value{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, }, behaviors: []behavior{ { name: "exact first", input: input{ year: 1984, month: 3, day: 30, hour: 0, }, output: output{ 0, }, }, { name: "exact second", input: input{ year: 1985, month: 3, day: 30, hour: 0, }, output: output{ 1, }, }, { name: "before first", input: input{ year: 1983, month: 9, day: 29, hour: 12, }, output: output{ 0, }, }, { name: "after second", input: input{ year: 1985, month: 9, day: 28, hour: 12, }, output: output{ 1, }, }, { name: "middle", input: input{ year: 1984, month: 9, day: 28, hour: 12, }, output: output{ 0, 1, }, }, }, }, { name: "triple", values: []value{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, { year: 1986, month: 3, day: 30, hour: 0, value: 2, }, }, behaviors: []behavior{ { name: "exact first", input: input{ year: 1984, month: 3, day: 30, hour: 0, }, output: output{ 0, }, }, { name: "exact second", input: input{ year: 1985, month: 3, day: 30, hour: 0, }, output: output{ 1, }, }, { name: "exact third", input: input{ year: 1986, month: 3, day: 30, hour: 0, }, output: output{ 2, }, }, { name: "before first", input: input{ year: 1983, month: 9, day: 29, hour: 12, }, output: output{ 0, }, }, { name: "after third", input: input{ year: 1986, month: 9, day: 28, hour: 12, }, output: output{ 2, }, }, { name: "first middle", input: input{ year: 1984, month: 9, day: 28, hour: 12, }, output: output{ 0, 1, }, }, { name: "second middle", input: input{ year: 1985, month: 9, day: 28, hour: 12, }, output: output{ 1, 2, }, }, }, }, } for i, context := range contexts { // Wrapping in function to enable garbage collection of resources. func() { p, closer := persistenceMaker() defer closer.Close() defer p.Close() m := clientmodel.Metric{ clientmodel.MetricNameLabel: "age_in_years", } for _, value := range context.values { testAppendSamples(p, &clientmodel.Sample{ Value: clientmodel.SampleValue(value.value), Timestamp: clientmodel.TimestampFromTime(time.Date(value.year, value.month, value.day, value.hour, 0, 0, 0, time.UTC)), Metric: m, }, t) } for j, behavior := range context.behaviors { input := behavior.input time := clientmodel.TimestampFromTime(time.Date(input.year, input.month, input.day, input.hour, 0, 0, 0, time.UTC)) fingerprint := &clientmodel.Fingerprint{} fingerprint.LoadFromMetric(m) actual := p.GetValueAtTime(fingerprint, time) if len(behavior.output) != len(actual) { t.Fatalf("%d.%d(%s.%s). Expected %d samples but got: %v\n", i, j, context.name, behavior.name, len(behavior.output), actual) } for k, samplePair := range actual { if samplePair.Value != behavior.output[k] { t.Fatalf("%d.%d.%d(%s.%s). Expected %s but got %s\n", i, j, k, context.name, behavior.name, behavior.output[k], samplePair) } } } }() } }
func testHealthScheduler(t test.Tester) { now := time.Now() var scenarios = []struct { futureHealthState []TargetState preloadedTimes []time.Time expectedSchedule []time.Time }{ // The behavior discussed in healthScheduler.Reschedule should be read // fully to understand the whys and wherefores. { futureHealthState: []TargetState{UNKNOWN, ALIVE, ALIVE}, preloadedTimes: []time.Time{now, now.Add(time.Minute), now.Add(time.Minute * 2)}, expectedSchedule: []time.Time{now, now.Add(time.Minute), now.Add(time.Minute * 2)}, }, { futureHealthState: []TargetState{UNKNOWN, UNREACHABLE, UNREACHABLE}, preloadedTimes: []time.Time{now, now.Add(time.Minute), now.Add(time.Minute * 2)}, expectedSchedule: []time.Time{now, now.Add(time.Second * 2), now.Add(time.Minute).Add(time.Second * 4)}, }, { futureHealthState: []TargetState{UNKNOWN, UNREACHABLE, ALIVE}, preloadedTimes: []time.Time{now, now.Add(time.Minute), now.Add(time.Minute * 2)}, expectedSchedule: []time.Time{now, now.Add(time.Second * 2), now.Add(time.Minute * 2)}, }, { futureHealthState: []TargetState{UNKNOWN, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE, UNREACHABLE}, preloadedTimes: []time.Time{now, now.Add(time.Minute), now.Add(time.Minute * 2), now.Add(time.Minute * 3), now.Add(time.Minute * 4), now.Add(time.Minute * 5), now.Add(time.Minute * 6), now.Add(time.Minute * 7), now.Add(time.Minute * 8), now.Add(time.Minute * 9), now.Add(time.Minute * 10), now.Add(time.Minute * 11), now.Add(time.Minute * 12)}, expectedSchedule: []time.Time{now, now.Add(time.Second * 2), now.Add(time.Minute * 1).Add(time.Second * 4), now.Add(time.Minute * 2).Add(time.Second * 8), now.Add(time.Minute * 3).Add(time.Second * 16), now.Add(time.Minute * 4).Add(time.Second * 32), now.Add(time.Minute * 5).Add(time.Second * 64), now.Add(time.Minute * 6).Add(time.Second * 128), now.Add(time.Minute * 7).Add(time.Second * 256), now.Add(time.Minute * 8).Add(time.Second * 512), now.Add(time.Minute * 9).Add(time.Second * 1024), now.Add(time.Minute * 10).Add(time.Minute * 30), now.Add(time.Minute * 11).Add(time.Minute * 30)}, }, } for i, scenario := range scenarios { provider := test.NewInstantProvider(scenario.preloadedTimes) reporter := fakeHealthReporter{} for _, state := range scenario.futureHealthState { reporter.stateQueue = append(reporter.stateQueue, state) } if len(scenario.preloadedTimes) != len(scenario.futureHealthState) || len(scenario.futureHealthState) != len(scenario.expectedSchedule) { t.Fatalf("%d. times and health reports and next time lengths were not equal.", i) } time := utility.Time{ Provider: provider, } scheduler := healthScheduler{ time: time, target: reporter, scheduledFor: now, } for j := 0; j < len(scenario.preloadedTimes); j++ { futureState := scenario.futureHealthState[j] scheduler.Reschedule(scenario.preloadedTimes[j], futureState) nextSchedule := scheduler.ScheduledFor() if nextSchedule != scenario.expectedSchedule[j] { t.Errorf("%d.%d. Expected to be scheduled to %s, got %s", i, j, scenario.expectedSchedule[j], nextSchedule) } } } }
func GetValueAtTimeTests(persistenceMaker func() (MetricPersistence, test.Closer), t test.Tester) { type value struct { year int month time.Month day int hour int value float32 } type input struct { year int month time.Month day int hour int staleness time.Duration } type output struct { value model.SampleValue } type behavior struct { name string input input output *output } var contexts = []struct { name string values []value behaviors []behavior }{ { name: "no values", values: []value{}, behaviors: []behavior{ { name: "random target", input: input{ year: 1984, month: 3, day: 30, hour: 0, staleness: time.Duration(0), }, }, }, }, { name: "singleton", values: []value{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, }, behaviors: []behavior{ { name: "exact without staleness policy", input: input{ year: 1984, month: 3, day: 30, hour: 0, staleness: time.Duration(0), }, output: &output{ value: 0, }, }, { name: "exact with staleness policy", input: input{ year: 1984, month: 3, day: 30, hour: 0, staleness: time.Duration(365*24) * time.Hour, }, output: &output{ value: 0, }, }, { name: "before without staleness policy", input: input{ year: 1984, month: 3, day: 29, hour: 0, staleness: time.Duration(0), }, }, { name: "before within staleness policy", input: input{ year: 1984, month: 3, day: 29, hour: 0, staleness: time.Duration(365*24) * time.Hour, }, }, { name: "before outside staleness policy", input: input{ year: 1984, month: 3, day: 29, hour: 0, staleness: time.Duration(1) * time.Hour, }, }, { name: "after without staleness policy", input: input{ year: 1984, month: 3, day: 31, hour: 0, staleness: time.Duration(0), }, }, { name: "after within staleness policy", input: input{ year: 1984, month: 3, day: 31, hour: 0, staleness: time.Duration(365*24) * time.Hour, }, output: &output{ value: 0, }, }, { name: "after outside staleness policy", input: input{ year: 1984, month: 4, day: 7, hour: 0, staleness: time.Duration(7*24) * time.Hour, }, }, }, }, { name: "double", values: []value{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, }, behaviors: []behavior{ { name: "exact first without staleness policy", input: input{ year: 1984, month: 3, day: 30, hour: 0, staleness: time.Duration(0), }, output: &output{ value: 0, }, }, { name: "exact first with staleness policy", input: input{ year: 1984, month: 3, day: 30, hour: 0, staleness: time.Duration(365*24) * time.Hour, }, output: &output{ value: 0, }, }, { name: "exact second without staleness policy", input: input{ year: 1985, month: 3, day: 30, hour: 0, staleness: time.Duration(0), }, output: &output{ value: 1, }, }, { name: "exact second with staleness policy", input: input{ year: 1985, month: 3, day: 30, hour: 0, staleness: time.Duration(365*24) * time.Hour, }, output: &output{ value: 1, }, }, { name: "before first without staleness policy", input: input{ year: 1983, month: 9, day: 29, hour: 12, staleness: time.Duration(0), }, }, { name: "before first with staleness policy", input: input{ year: 1983, month: 9, day: 29, hour: 12, staleness: time.Duration(365*24) * time.Hour, }, }, { name: "after second with staleness policy", input: input{ year: 1985, month: 9, day: 28, hour: 12, staleness: time.Duration(365*24) * time.Hour, }, output: &output{ value: 1, }, }, { name: "after second without staleness policy", input: input{ year: 1985, month: 9, day: 28, hour: 12, staleness: time.Duration(0), }, }, { name: "middle without staleness policy", input: input{ year: 1984, month: 9, day: 28, hour: 12, staleness: time.Duration(0), }, }, { name: "middle with insufficient staleness policy", input: input{ year: 1984, month: 9, day: 28, hour: 12, staleness: time.Duration(364*24) * time.Hour, }, }, { name: "middle with sufficient staleness policy", input: input{ year: 1984, month: 9, day: 28, hour: 12, staleness: time.Duration(365*24) * time.Hour, }, output: &output{ value: 0.5, }, }, }, }, { name: "triple", values: []value{ { year: 1984, month: 3, day: 30, hour: 0, value: 0, }, { year: 1985, month: 3, day: 30, hour: 0, value: 1, }, { year: 1986, month: 3, day: 30, hour: 0, value: 2, }, }, behaviors: []behavior{ { name: "exact first without staleness policy", input: input{ year: 1984, month: 3, day: 30, hour: 0, staleness: time.Duration(0), }, output: &output{ value: 0, }, }, { name: "exact first with staleness policy", input: input{ year: 1984, month: 3, day: 30, hour: 0, staleness: time.Duration(365*24) * time.Hour, }, output: &output{ value: 0, }, }, { name: "exact second without staleness policy", input: input{ year: 1985, month: 3, day: 30, hour: 0, staleness: time.Duration(0), }, output: &output{ value: 1, }, }, { name: "exact second with staleness policy", input: input{ year: 1985, month: 3, day: 30, hour: 0, staleness: time.Duration(365*24) * time.Hour, }, output: &output{ value: 1, }, }, { name: "exact third without staleness policy", input: input{ year: 1986, month: 3, day: 30, hour: 0, staleness: time.Duration(0), }, output: &output{ value: 2, }, }, { name: "exact third with staleness policy", input: input{ year: 1986, month: 3, day: 30, hour: 0, staleness: time.Duration(365*24) * time.Hour, }, output: &output{ value: 2, }, }, { name: "before first without staleness policy", input: input{ year: 1983, month: 9, day: 29, hour: 12, staleness: time.Duration(0), }, }, { name: "before first with staleness policy", input: input{ year: 1983, month: 9, day: 29, hour: 12, staleness: time.Duration(365*24) * time.Hour, }, }, { name: "after third within staleness policy", input: input{ year: 1986, month: 9, day: 28, hour: 12, staleness: time.Duration(365*24) * time.Hour, }, output: &output{ value: 2, }, }, { name: "after third outside staleness policy", input: input{ year: 1986, month: 9, day: 28, hour: 12, staleness: time.Duration(1*24) * time.Hour, }, }, { name: "after third without staleness policy", input: input{ year: 1986, month: 9, day: 28, hour: 12, staleness: time.Duration(0), }, }, { name: "first middle without staleness policy", input: input{ year: 1984, month: 9, day: 28, hour: 12, staleness: time.Duration(0), }, }, { name: "first middle with insufficient staleness policy", input: input{ year: 1984, month: 9, day: 28, hour: 12, staleness: time.Duration(364*24) * time.Hour, }, }, { name: "first middle with sufficient staleness policy", input: input{ year: 1984, month: 9, day: 28, hour: 12, staleness: time.Duration(365*24) * time.Hour, }, output: &output{ value: 0.5, }, }, { name: "second middle without staleness policy", input: input{ year: 1985, month: 9, day: 28, hour: 12, staleness: time.Duration(0), }, }, { name: "second middle with insufficient staleness policy", input: input{ year: 1985, month: 9, day: 28, hour: 12, staleness: time.Duration(364*24) * time.Hour, }, }, { name: "second middle with sufficient staleness policy", input: input{ year: 1985, month: 9, day: 28, hour: 12, staleness: time.Duration(365*24) * time.Hour, }, output: &output{ value: 1.5, }, }, }, }, } for i, context := range contexts { // Wrapping in function to enable garbage collection of resources. func() { p, closer := persistenceMaker() defer closer.Close() defer p.Close() m := model.Metric{ model.MetricNameLabel: "age_in_years", } for _, value := range context.values { testAppendSample(p, model.Sample{ Value: model.SampleValue(value.value), Timestamp: time.Date(value.year, value.month, value.day, value.hour, 0, 0, 0, time.UTC), Metric: m, }, t) } for j, behavior := range context.behaviors { input := behavior.input time := time.Date(input.year, input.month, input.day, input.hour, 0, 0, 0, time.UTC) sp := StalenessPolicy{ DeltaAllowance: input.staleness, } actual, err := p.GetValueAtTime(model.NewFingerprintFromMetric(m), time, sp) if err != nil { t.Fatalf("%d.%d(%s). Could not query for value: %q\n", i, j, behavior.name, err) } if behavior.output == nil { if actual != nil { t.Fatalf("%d.%d(%s). Expected nil but got: %q\n", i, j, behavior.name, actual) } } else { if actual == nil { t.Fatalf("%d.%d(%s). Expected %s but got nil\n", i, j, behavior.name, behavior.output) } else { if actual.Value != behavior.output.value { t.Fatalf("%d.%d(%s). Expected %s but got %s\n", i, j, behavior.name, behavior.output, actual) } } } } }() } }
func testMakeView(t test.Tester, flushToDisk bool) { type in struct { atTime []getValuesAtTimeOp atInterval []getValuesAtIntervalOp alongRange []getValuesAlongRangeOp } type out struct { atTime []metric.Values atInterval []metric.Values alongRange []metric.Values } m := clientmodel.Metric{clientmodel.MetricNameLabel: "request_count"} fingerprint := &clientmodel.Fingerprint{} fingerprint.LoadFromMetric(m) var ( instant = clientmodel.TimestampFromTime(time.Date(1984, 3, 30, 0, 0, 0, 0, time.Local)) scenarios = []struct { data clientmodel.Samples in in out out diskOnly bool }{ // No sample, but query asks for one. { in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant}, }, }, }, out: out{ atTime: []metric.Values{{}}, }, }, // Single sample, query asks for exact sample time. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant, }, }, in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant, Value: 0, }, }, }, }, }, // Single sample, query time before the sample. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant.Add(time.Second), }, { Metric: m, Value: 1, Timestamp: instant.Add(time.Second * 2), }, }, in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant.Add(time.Second), Value: 0, }, }, }, }, }, // Single sample, query time after the sample. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant, }, }, in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant.Add(time.Second)}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant, Value: 0, }, }, }, }, }, // Two samples, query asks for first sample time. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant, }, { Metric: m, Value: 1, Timestamp: instant.Add(time.Second), }, }, in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant, Value: 0, }, }, }, }, }, // Three samples, query asks for second sample time. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant, }, { Metric: m, Value: 1, Timestamp: instant.Add(time.Second), }, { Metric: m, Value: 2, Timestamp: instant.Add(time.Second * 2), }, }, in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant.Add(time.Second)}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant.Add(time.Second), Value: 1, }, }, }, }, }, // Three samples, query asks for time between first and second samples. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant, }, { Metric: m, Value: 1, Timestamp: instant.Add(time.Second * 2), }, { Metric: m, Value: 2, Timestamp: instant.Add(time.Second * 4), }, }, in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant.Add(time.Second)}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant, Value: 0, }, { Timestamp: instant.Add(time.Second * 2), Value: 1, }, }, }, }, }, // Three samples, query asks for time between second and third samples. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant, }, { Metric: m, Value: 1, Timestamp: instant.Add(time.Second * 2), }, { Metric: m, Value: 2, Timestamp: instant.Add(time.Second * 4), }, }, in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant.Add(time.Second * 3)}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant.Add(time.Second * 2), Value: 1, }, { Timestamp: instant.Add(time.Second * 4), Value: 2, }, }, }, }, }, // Two chunks of samples, query asks for values from second chunk. { data: buildSamples( instant, instant.Add(time.Duration(*leveldbChunkSize*4)*time.Second), 2*time.Second, m, ), in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant.Add(time.Second*time.Duration(*leveldbChunkSize*2) + clientmodel.MinimumTick)}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant.Add(time.Second * time.Duration(*leveldbChunkSize*2)), Value: 200, }, { Timestamp: instant.Add(time.Second * (time.Duration(*leveldbChunkSize*2) + 2)), Value: 201, }, }, }, }, }, // Two chunks of samples, query asks for values between both chunks. { data: buildSamples( instant, instant.Add(time.Duration(*leveldbChunkSize*4)*time.Second), 2*time.Second, m, ), in: in{ atTime: []getValuesAtTimeOp{ { baseOp: baseOp{current: instant.Add(time.Second*time.Duration(*leveldbChunkSize*2) - clientmodel.MinimumTick)}, }, }, }, out: out{ atTime: []metric.Values{ { { Timestamp: instant.Add(time.Second * (time.Duration(*leveldbChunkSize*2) - 2)), Value: 199, }, { Timestamp: instant.Add(time.Second * time.Duration(*leveldbChunkSize*2)), Value: 200, }, }, }, }, }, // Two chunks of samples, getValuesAtIntervalOp spanning both. { data: buildSamples( instant, instant.Add(time.Duration(*leveldbChunkSize*6)*time.Second), 2*time.Second, m, ), in: in{ atInterval: []getValuesAtIntervalOp{ { getValuesAlongRangeOp: getValuesAlongRangeOp{ baseOp: baseOp{current: instant.Add(time.Second*time.Duration(*leveldbChunkSize*2-4) - clientmodel.MinimumTick)}, through: instant.Add(time.Second*time.Duration(*leveldbChunkSize*2+4) + clientmodel.MinimumTick), }, interval: time.Second * 6, }, }, }, out: out{ atInterval: []metric.Values{ { { Timestamp: instant.Add(time.Second * time.Duration(*leveldbChunkSize*2-6)), Value: 197, }, { Timestamp: instant.Add(time.Second * time.Duration(*leveldbChunkSize*2-4)), Value: 198, }, { Timestamp: instant.Add(time.Second * time.Duration(*leveldbChunkSize*2)), Value: 200, }, { Timestamp: instant.Add(time.Second * time.Duration(*leveldbChunkSize*2+2)), Value: 201, }, }, }, }, }, // Three chunks of samples, getValuesAlongRangeOp spanning all of them. { data: buildSamples( instant, instant.Add(time.Duration(*leveldbChunkSize*6)*time.Second), 2*time.Second, m, ), in: in{ alongRange: []getValuesAlongRangeOp{ { baseOp: baseOp{current: instant.Add(time.Second*time.Duration(*leveldbChunkSize*2-4) - clientmodel.MinimumTick)}, through: instant.Add(time.Second*time.Duration(*leveldbChunkSize*4+2) + clientmodel.MinimumTick), }, }, }, out: out{ alongRange: []metric.Values{buildValues( clientmodel.SampleValue(198), instant.Add(time.Second*time.Duration(*leveldbChunkSize*2-4)), instant.Add(time.Second*time.Duration(*leveldbChunkSize*4+2)+clientmodel.MinimumTick), 2*time.Second, )}, }, }, // Three chunks of samples and a getValuesAlongIntervalOp with an // interval larger than the natural sample interval, spanning the gap // between the second and third chunks. To test two consecutive // ExtractSamples() calls for the same op, we need three on-disk chunks, // because the first two chunks are loaded from disk together and passed // as one unit into ExtractSamples(). Especially, we want to test that // the first sample of the last chunk is included in the result. // // This is a regression test for an interval operator advancing too far // past the end of the currently available chunk, effectively skipping // over a value which is only available in the next chunk passed to // ExtractSamples(). // // Chunk and operator layout, assuming 200 samples per chunk: // // Chunk 1 Chunk 2 Chunk 3 // Values: 0......199 200......399 400......599 // Times: 0......398 400......798 800......1198 // | | // |_________ Operator _______| // 395 399 ...... 795 799 803 { data: buildSamples( instant, instant.Add(time.Duration(*leveldbChunkSize*6)*time.Second), 2*time.Second, m, ), in: in{ atInterval: []getValuesAtIntervalOp{ { getValuesAlongRangeOp: getValuesAlongRangeOp{ baseOp: baseOp{current: instant.Add(time.Second * time.Duration(*leveldbChunkSize*2-5))}, through: instant.Add(time.Second * time.Duration(*leveldbChunkSize*4+3)), }, interval: time.Second * 4, }, }, }, out: out{ atInterval: []metric.Values{ // We need two overlapping buildValues() calls here since the last // value of the second chunk is extracted twice (value 399, time // offset 798s). append( // Values 197...399. // Times 394...798. buildValues( clientmodel.SampleValue(197), instant.Add(time.Second*time.Duration(*leveldbChunkSize*2-6)), instant.Add(time.Second*time.Duration(*leveldbChunkSize*4)), 2*time.Second, ), // Values 399...402. // Times 798...804. buildValues( clientmodel.SampleValue(399), instant.Add(time.Second*time.Duration(*leveldbChunkSize*4-2)), instant.Add(time.Second*time.Duration(*leveldbChunkSize*4+6)), 2*time.Second, )..., ), }, }, // This example only works with on-disk chunks due to the repeatedly // extracted value at the end of the second chunk. diskOnly: true, }, // Single sample, getValuesAtIntervalOp starting after the sample. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant, }, }, in: in{ atInterval: []getValuesAtIntervalOp{ { getValuesAlongRangeOp: getValuesAlongRangeOp{ baseOp: baseOp{current: instant.Add(time.Second)}, through: instant.Add(time.Second * 2), }, interval: time.Second, }, }, }, out: out{ atInterval: []metric.Values{ { { Timestamp: instant, Value: 0, }, }, }, }, }, // Single sample, getValuesAtIntervalOp starting before the sample. { data: clientmodel.Samples{ { Metric: m, Value: 0, Timestamp: instant.Add(time.Second), }, }, in: in{ atInterval: []getValuesAtIntervalOp{ { getValuesAlongRangeOp: getValuesAlongRangeOp{ baseOp: baseOp{current: instant}, through: instant.Add(time.Second * 2), }, interval: time.Second, }, }, }, out: out{ atInterval: []metric.Values{ { { Timestamp: instant.Add(time.Second), Value: 0, }, { Timestamp: instant.Add(time.Second), Value: 0, }, }, }, }, }, } ) for i, scenario := range scenarios { if scenario.diskOnly && !flushToDisk { continue } tiered, closer := NewTestTieredStorage(t) err := tiered.AppendSamples(scenario.data) if err != nil { t.Fatalf("%d. failed to add fixture data: %s", i, err) } if flushToDisk { tiered.Flush() } requestBuilder := tiered.NewViewRequestBuilder() for _, atTime := range scenario.in.atTime { requestBuilder.GetMetricAtTime(fingerprint, atTime.current) } for _, atInterval := range scenario.in.atInterval { requestBuilder.GetMetricAtInterval(fingerprint, atInterval.current, atInterval.through, atInterval.interval) } for _, alongRange := range scenario.in.alongRange { requestBuilder.GetMetricRange(fingerprint, alongRange.current, alongRange.through) } v, err := requestBuilder.Execute(time.Second*5, stats.NewTimerGroup()) if err != nil { t.Fatalf("%d. failed due to %s", i, err) } // To get all values in the View, ask for the 'forever' interval. interval := metric.Interval{OldestInclusive: math.MinInt64, NewestInclusive: math.MaxInt64} for j, atTime := range scenario.out.atTime { actual := v.GetRangeValues(fingerprint, interval) if len(actual) != len(atTime) { t.Fatalf("%d.%d. expected %d output, got %d", i, j, len(atTime), len(actual)) } for k, value := range atTime { if value.Value != actual[k].Value { t.Errorf("%d.%d.%d expected %v value, got %v", i, j, k, value.Value, actual[k].Value) } if !value.Timestamp.Equal(actual[k].Timestamp) { t.Errorf("%d.%d.%d expected %s (offset %ss) timestamp, got %s (offset %ss)", i, j, k, value.Timestamp, value.Timestamp.Sub(instant), actual[k].Timestamp, actual[k].Timestamp.Sub(instant)) } } } for j, atInterval := range scenario.out.atInterval { actual := v.GetRangeValues(fingerprint, interval) if len(actual) != len(atInterval) { t.Fatalf("%d.%d. expected %d output, got %d", i, j, len(atInterval), len(actual)) } for k, value := range atInterval { if value.Value != actual[k].Value { t.Errorf("%d.%d.%d expected %v value, got %v", i, j, k, value.Value, actual[k].Value) } if !value.Timestamp.Equal(actual[k].Timestamp) { t.Errorf("%d.%d.%d expected %s (offset %ds) timestamp, got %s (offset %ds, value %s)", i, j, k, value.Timestamp, int(value.Timestamp.Sub(instant)/time.Second), actual[k].Timestamp, int(actual[k].Timestamp.Sub(instant)/time.Second), actual[k].Value) } } } for j, alongRange := range scenario.out.alongRange { actual := v.GetRangeValues(fingerprint, interval) if len(actual) != len(alongRange) { t.Fatalf("%d.%d. expected %d output, got %d", i, j, len(alongRange), len(actual)) } for k, value := range alongRange { if value.Value != actual[k].Value { t.Fatalf("%d.%d.%d expected %v value, got %v", i, j, k, value.Value, actual[k].Value) } if !value.Timestamp.Equal(actual[k].Timestamp) { t.Fatalf("%d.%d.%d expected %s (offset %ss) timestamp, got %s (offset %ss)", i, j, k, value.Timestamp, value.Timestamp.Sub(instant), actual[k].Timestamp, actual[k].Timestamp.Sub(instant)) } } } closer.Close() } }
func StochasticTests(persistenceMaker func() (metric.Persistence, test.Closer), t test.Tester) { stochastic := func(x int) (success bool) { p, closer := persistenceMaker() defer closer.Close() defer p.Close() seed := rand.NewSource(int64(x)) random := rand.New(seed) numberOfMetrics := random.Intn(stochasticMaximumVariance) + 1 numberOfSharedLabels := random.Intn(stochasticMaximumVariance) numberOfUnsharedLabels := random.Intn(stochasticMaximumVariance) numberOfSamples := random.Intn(stochasticMaximumVariance) + 2 numberOfRangeScans := random.Intn(stochasticMaximumVariance) metricTimestamps := map[int]map[int64]bool{} metricEarliestSample := map[int]int64{} metricNewestSample := map[int]int64{} for metricIndex := 0; metricIndex < numberOfMetrics; metricIndex++ { sample := &clientmodel.Sample{ Metric: clientmodel.Metric{}, } v := clientmodel.LabelValue(fmt.Sprintf("metric_index_%d", metricIndex)) sample.Metric[clientmodel.MetricNameLabel] = v for sharedLabelIndex := 0; sharedLabelIndex < numberOfSharedLabels; sharedLabelIndex++ { l := clientmodel.LabelName(fmt.Sprintf("shared_label_%d", sharedLabelIndex)) v := clientmodel.LabelValue(fmt.Sprintf("label_%d", sharedLabelIndex)) sample.Metric[l] = v } for unsharedLabelIndex := 0; unsharedLabelIndex < numberOfUnsharedLabels; unsharedLabelIndex++ { l := clientmodel.LabelName(fmt.Sprintf("metric_index_%d_private_label_%d", metricIndex, unsharedLabelIndex)) v := clientmodel.LabelValue(fmt.Sprintf("private_label_%d", unsharedLabelIndex)) sample.Metric[l] = v } timestamps := map[int64]bool{} metricTimestamps[metricIndex] = timestamps var newestSample int64 = math.MinInt64 var oldestSample int64 = math.MaxInt64 var nextTimestamp func() int64 nextTimestamp = func() int64 { var candidate int64 candidate = random.Int63n(math.MaxInt32 - 1) if _, has := timestamps[candidate]; has { // WART candidate = nextTimestamp() } timestamps[candidate] = true if candidate < oldestSample { oldestSample = candidate } if candidate > newestSample { newestSample = candidate } return candidate } // BUG(matt): Invariant of the in-memory database assumes this. sortedTimestamps := timeslice{} for sampleIndex := 0; sampleIndex < numberOfSamples; sampleIndex++ { sortedTimestamps = append(sortedTimestamps, clientmodel.TimestampFromUnix(nextTimestamp())) } sort.Sort(sortedTimestamps) for sampleIndex := 0; sampleIndex < numberOfSamples; sampleIndex++ { sample.Timestamp = sortedTimestamps[sampleIndex] sample.Value = clientmodel.SampleValue(sampleIndex) err := p.AppendSamples(clientmodel.Samples{sample}) if err != nil { t.Error(err) return } } metricEarliestSample[metricIndex] = oldestSample metricNewestSample[metricIndex] = newestSample for sharedLabelIndex := 0; sharedLabelIndex < numberOfSharedLabels; sharedLabelIndex++ { matchers := metric.LabelMatchers{{ Type: metric.Equal, Name: clientmodel.LabelName(fmt.Sprintf("shared_label_%d", sharedLabelIndex)), Value: clientmodel.LabelValue(fmt.Sprintf("label_%d", sharedLabelIndex)), }} fingerprints, err := p.GetFingerprintsForLabelMatchers(matchers) if err != nil { t.Error(err) return } if len(fingerprints) == 0 { t.Errorf("expected fingerprint count of %d, got %d", 0, len(fingerprints)) return } } } for metricIndex := 0; metricIndex < numberOfMetrics; metricIndex++ { for unsharedLabelIndex := 0; unsharedLabelIndex < numberOfUnsharedLabels; unsharedLabelIndex++ { labelName := clientmodel.LabelName(fmt.Sprintf("metric_index_%d_private_label_%d", metricIndex, unsharedLabelIndex)) labelValue := clientmodel.LabelValue(fmt.Sprintf("private_label_%d", unsharedLabelIndex)) matchers := metric.LabelMatchers{{ Type: metric.Equal, Name: labelName, Value: labelValue, }} fingerprints, err := p.GetFingerprintsForLabelMatchers(matchers) if err != nil { t.Error(err) return } if len(fingerprints) != 1 { t.Errorf("expected fingerprint count of %d, got %d", 1, len(fingerprints)) return } } m := clientmodel.Metric{} m[clientmodel.MetricNameLabel] = clientmodel.LabelValue(fmt.Sprintf("metric_index_%d", metricIndex)) for i := 0; i < numberOfSharedLabels; i++ { l := clientmodel.LabelName(fmt.Sprintf("shared_label_%d", i)) v := clientmodel.LabelValue(fmt.Sprintf("label_%d", i)) m[l] = v } for i := 0; i < numberOfUnsharedLabels; i++ { l := clientmodel.LabelName(fmt.Sprintf("metric_index_%d_private_label_%d", metricIndex, i)) v := clientmodel.LabelValue(fmt.Sprintf("private_label_%d", i)) m[l] = v } for i := 0; i < numberOfRangeScans; i++ { timestamps := metricTimestamps[metricIndex] var first int64 var second int64 for { firstCandidate := random.Int63n(int64(len(timestamps))) secondCandidate := random.Int63n(int64(len(timestamps))) smallest := int64(-1) largest := int64(-1) if firstCandidate == secondCandidate { continue } else if firstCandidate > secondCandidate { largest = firstCandidate smallest = secondCandidate } else { largest = secondCandidate smallest = firstCandidate } j := int64(0) for i := range timestamps { if j == smallest { first = i } else if j == largest { second = i break } j++ } break } begin := first end := second if second < first { begin, end = second, first } interval := metric.Interval{ OldestInclusive: clientmodel.TimestampFromUnix(begin), NewestInclusive: clientmodel.TimestampFromUnix(end), } samples := metric.Values{} fp := &clientmodel.Fingerprint{} fp.LoadFromMetric(m) switch persistence := p.(type) { case metric.View: samples = persistence.GetRangeValues(fp, interval) if len(samples) < 2 { t.Fatalf("expected sample count greater than %d, got %d", 2, len(samples)) } case *LevelDBPersistence: var err error samples, err = levelDBGetRangeValues(persistence, fp, interval) if err != nil { t.Fatal(err) } if len(samples) < 2 { t.Fatalf("expected sample count greater than %d, got %d", 2, len(samples)) } default: t.Error("Unexpected type of metric.Persistence.") } } } return true } if err := quick.Check(stochastic, nil); err != nil { t.Error(err) } }