func TestApplyNotes(t *testing.T) { var testTimerange, err = api.NewTimerange(758400000, 758400000+30000*5, 30000) if err != nil { t.Fatalf("invalid timerange used for testcase") return } // epsilon := 1e-10 list := api.SeriesList{ Series: []api.Timeseries{ { Values: []float64{1, 2, 3, 2, 1, 2}, TagSet: api.TagSet{ "series": "C", }, }, }, Timerange: testTimerange, Name: "test", } testCases := []struct { transform transform parameter []function.Value expected []string }{ { transform: rate, parameter: []function.Value{}, expected: []string{ "Rate(map[series:C]): The underlying counter reset between 2.000000, 1.000000\n", }, }, } for _, test := range testCases { ctx := function.EvaluationContext{EvaluationNotes: []string{}} _, err := ApplyTransform(&ctx, list, test.transform, test.parameter) if err != nil { t.Error(err) continue } if len(test.expected) != len(ctx.EvaluationNotes) { t.Errorf("Expected there to be %d notes but there were %d of them", len(test.expected), len(ctx.EvaluationNotes)) } for i, note := range test.expected { if i >= len(ctx.EvaluationNotes) { break } if ctx.EvaluationNotes[i] != note { t.Errorf("The context notes didn't include the evaluation message. Expected: %s Actually found: %s\n", note, ctx.EvaluationNotes[i]) } } } }
func TestSeriesFromMetricPoints(t *testing.T) { timerange, err := api.NewTimerange(4000, 4800, 100) if err != nil { t.Fatalf("testcase timerange is invalid") return } points := []metricPoint{ { Timestamp: 4100, Average: 1, }, { Timestamp: 4299, // Test flooring behavior Average: 2, }, { Timestamp: 4403, // Test flooring behavior Average: 3, }, { Timestamp: 4500, Average: 4, }, { Timestamp: 4700, Average: 5, }, { Timestamp: 4749, Average: 6, }, } expected := [][]float64{{}, {1}, {2}, {}, {3}, {4}, {}, {5, 6}, {}} result := bucketsFromMetricPoints(points, func(point metricPoint) float64 { return point.Average }, timerange) if len(result) != len(expected) { t.Fatalf("Expected %+v but got %+v", expected, result) return } for i, expect := range expected { if len(result[i]) != len(expect) { t.Fatalf("Exected %+v but got %+v", expected, result) return } for j := range expect { if result[i][j] != expect[j] { t.Fatalf("Expected %+v but got %+v", expected, result) return } } } }
func TestMovingAverage(t *testing.T) { fakeAPI := mocks.NewFakeMetricMetadataAPI() fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series", api.NewTagSet()}) fakeBackend := movingAverageBackend{} timerange, err := api.NewTimerange(1200, 1500, 100) if err != nil { t.Fatalf(err.Error()) } expression := &functionExpression{ functionName: "transform.moving_average", groupBy: []string{}, arguments: []function.Expression{ &metricFetchExpression{"series", api.TruePredicate}, durationExpression{"300ms", 300 * time.Millisecond}, }, } backend := fakeBackend result, err := evaluateToSeriesList(expression, &function.EvaluationContext{ MetricMetadataAPI: fakeAPI, TimeseriesStorageAPI: backend, Timerange: timerange, SampleMethod: api.SampleMean, FetchLimit: function.NewFetchCounter(1000), Registry: registry.Default(), Cancellable: api.NewCancellable(), OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) if err != nil { t.Errorf(err.Error()) } expected := []float64{4, 3, 11.0 / 3, 5} if len(result.Series) != 1 { t.Fatalf("expected exactly 1 returned series") } if len(result.Series[0].Values) != len(expected) { t.Fatalf("expected exactly %d values in returned series, but got %d", len(expected), len(result.Series[0].Values)) } const eps = 1e-7 for i := range expected { if math.Abs(result.Series[0].Values[i]-expected[i]) > eps { t.Fatalf("expected %+v but got %+v", expected, result.Series[0].Values) } } }
func TestMovingAverage(t *testing.T) { fakeAPI := mocks.NewFakeApi() fakeAPI.AddPair(api.TaggedMetric{"series", api.NewTagSet()}, "series") fakeBackend := movingAverageBackend{} timerange, err := api.NewTimerange(1200, 1500, 100) if err != nil { t.Fatalf(err.Error()) } expression := &functionExpression{ functionName: "transform.moving_average", groupBy: []string{}, arguments: []function.Expression{ &metricFetchExpression{"series", api.TruePredicate}, stringExpression{"300ms"}, }, } result, err := evaluateToSeriesList(expression, function.EvaluationContext{ API: fakeAPI, MultiBackend: backend.NewSequentialMultiBackend(fakeBackend), Timerange: timerange, SampleMethod: api.SampleMean, FetchLimit: function.NewFetchCounter(1000), Registry: registry.Default(), }) if err != nil { t.Errorf(err.Error()) } expected := []float64{4, 3, 11.0 / 3, 5} if len(result.Series) != 1 { t.Fatalf("expected exactly 1 returned series") } if len(result.Series[0].Values) != len(expected) { t.Fatalf("expected exactly %d values in returned series", len(expected)) } const eps = 1e-7 for i := range expected { if math.Abs(result.Series[0].Values[i]-expected[i]) > eps { t.Fatalf("expected %+v but got %+v", expected, result.Series[0].Values) } } }
func Test_ScalarExpression(t *testing.T) { timerangeA, err := api.NewTimerange(0, 10, 2) if err != nil { t.Fatalf("invalid timerange used for testcase") return } for _, test := range []struct { expr scalarExpression timerange api.Timerange expectedSeries []api.Timeseries }{ { scalarExpression{5}, timerangeA, []api.Timeseries{ api.Timeseries{ Values: []float64{5.0, 5.0, 5.0, 5.0, 5.0, 5.0}, TagSet: api.NewTagSet(), }, }, }, } { a := assert.New(t).Contextf("%+v", test) result, err := evaluateToSeriesList(test.expr, &function.EvaluationContext{ TimeseriesStorageAPI: FakeBackend{}, Timerange: test.timerange, SampleMethod: api.SampleMean, FetchLimit: function.NewFetchCounter(1000), Registry: registry.Default(), }) if err != nil { t.Fatalf("failed to convert number into serieslist") } a.EqInt(len(result.Series), len(test.expectedSeries)) for i := 0; i < len(result.Series); i++ { a.Eq(result.Series[i].Values, test.expectedSeries[i].Values) } } }
// Test that the transforms of the following work as expected: // - transform.derivative | transform.integral func TestTransformIdentity(t *testing.T) { //This is to make sure that the scale of all the data //is interpreted as 30 seconds (30000 milliseconds) timerange, _ := api.NewTimerange(0, int64(30000*5), int64(30000)) testCases := []struct { values []float64 timerange api.Timerange tests []struct { expected []float64 transforms []transform } }{ { values: []float64{0, 1, 2, 3, 4, 5}, timerange: timerange, tests: []struct { expected []float64 transforms []transform }{ { expected: []float64{0, 1, 2, 3, 4}, transforms: []transform{ derivative, Integral, }, }, { expected: []float64{0, 1, 2, 3, 4}, transforms: []transform{ rate, Integral, }, }, }, }, { values: []float64{12, 15, 20, 3, 18, 30}, timerange: timerange, tests: []struct { expected []float64 transforms []transform }{ { expected: []float64{0, 5, -12, 3, 15}, transforms: []transform{ derivative, Integral, }, }, { // While this is odd, think about it this way: // We saw 5 increments (15 - 20), then we saw thirty total increments // (3, 18, 30) over the rest of the time period expected: []float64{0, 5, 8, 23, 35}, transforms: []transform{ rate, Integral, }, }, }, }, } epsilon := 1e-10 var err error for _, test := range testCases { series := api.Timeseries{ Values: test.values, TagSet: api.TagSet{}, } for _, transform := range test.tests { result := series for _, fun := range transform.transforms { ctx := function.EvaluationContext{EvaluationNotes: []string{}} seriesList := api.SeriesList{ Series: []api.Timeseries{result}, Timerange: timerange, } params := []function.Value{} a, err := ApplyTransform(&ctx, seriesList, fun, params) result = a.Series[0] if err != nil { t.Error(err) break } } if err != nil { continue } if len(result.Values) != len(transform.expected) { t.Errorf("Expected result to have length %d but has length %d", len(transform.expected), len(result.Values)) continue } // Now check that the values are approximately equal for i := range result.Values { if math.Abs(result.Values[i]-transform.expected[i]) > epsilon { t.Errorf("Expected %+v but got %+v", transform.expected, result.Values) break } } } } }
func TestCommand_Select(t *testing.T) { epsilon := 1e-10 fakeApi := mocks.NewFakeApi() fakeApi.AddPair(api.TaggedMetric{"series_1", api.ParseTagSet("dc=west")}, emptyGraphiteName) fakeApi.AddPair(api.TaggedMetric{"series_2", api.ParseTagSet("dc=east")}, emptyGraphiteName) fakeApi.AddPair(api.TaggedMetric{"series_2", api.ParseTagSet("dc=west")}, emptyGraphiteName) fakeApi.AddPair(api.TaggedMetric{"series_3", api.ParseTagSet("dc=west")}, emptyGraphiteName) fakeApi.AddPair(api.TaggedMetric{"series_3", api.ParseTagSet("dc=east")}, emptyGraphiteName) fakeApi.AddPair(api.TaggedMetric{"series_3", api.ParseTagSet("dc=north")}, emptyGraphiteName) fakeApi.AddPair(api.TaggedMetric{"series_timeout", api.ParseTagSet("dc=west")}, emptyGraphiteName) var fakeBackend fakeApiBackend testTimerange, err := api.NewTimerange(0, 120, 30) if err != nil { t.Errorf("Invalid test timerange") return } earlyTimerange, err := api.NewTimerange(0, 60, 30) if err != nil { t.Errorf("Invalid test timerange") } lateTimerange, err := api.NewTimerange(60, 120, 30) if err != nil { t.Errorf("Invalid test timerange") } for _, test := range []struct { query string expectError bool expected api.SeriesList }{ {"select does_not_exist from 0 to 120 resolution 30ms", true, api.SeriesList{}}, {"select series_1 from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{1, 2, 3, 4, 5}, api.ParseTagSet("dc=west"), }}, Timerange: testTimerange, Name: "series_1", }}, {"select series_timeout from 0 to 120 resolution 30ms", true, api.SeriesList{}}, {"select series_1 + 1 from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 3, 4, 5, 6}, api.ParseTagSet("dc=west"), }}, Timerange: testTimerange, Name: "", }}, {"select series_1 * 2 from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 4, 6, 8, 10}, api.ParseTagSet("dc=west"), }}, Timerange: testTimerange, Name: "", }}, {"select aggregate.max(series_2) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{3, 2, 3, 6, 5}, api.NewTagSet(), }}, Timerange: testTimerange, Name: "series_2", }}, {"select (1 + series_2) | aggregate.max from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{4, 3, 4, 7, 6}, api.NewTagSet(), }}, Timerange: testTimerange, Name: "series_2", }}, {"select series_1 from 0 to 60 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{1, 2, 3}, api.ParseTagSet("dc=west"), }}, Timerange: earlyTimerange, Name: "series_1", }}, {"select transform.timeshift(series_1,31ms) from 0 to 60 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 3, 4}, api.ParseTagSet("dc=west"), }}, Timerange: earlyTimerange, Name: "series_1", }}, {"select transform.timeshift(series_1,62ms) from 0 to 60 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{3, 4, 5}, api.ParseTagSet("dc=west"), }}, Timerange: earlyTimerange, Name: "series_1", }}, {"select transform.timeshift(series_1,29ms) from 0 to 60 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 3, 4}, api.ParseTagSet("dc=west"), }}, Timerange: earlyTimerange, Name: "series_1", }}, {"select transform.timeshift(series_1,-31ms) from 60 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 3, 4}, api.ParseTagSet("dc=west"), }}, Timerange: lateTimerange, Name: "series_1", }}, {"select transform.timeshift(series_1,-29ms) from 60 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 3, 4}, api.ParseTagSet("dc=west"), }}, Timerange: lateTimerange, Name: "series_1", }}, {"select series_3 from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, }, }}, {"select series_3 | filter.recent_highest_max(3, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, }, }}, {"select series_3 | filter.recent_highest_max(2, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, }, }}, {"select series_3 | filter.recent_highest_max(1, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, }, }}, {"select series_3 | filter.recent_lowest_max(3, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, }, }}, {"select series_3 | filter.recent_lowest_max(4, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, }, }}, {"select series_3 | filter.recent_highest_max(70, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, }, }}, {"select series_3 | filter.recent_lowest_max(2, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, }, }}, {"select series_3 | filter.recent_lowest_max(1, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, }, }}, {"select series_3 | filter.recent_highest_max(3, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, }, }}, {"select series_3 | filter.recent_highest_max(2, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, }, }}, {"select series_3 | filter.recent_highest_max(1, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, }, }}, {"select series_3 | filter.recent_lowest_max(3, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, }, }}, {"select series_3 | filter.recent_lowest_max(2, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, }, }}, {"select series_3 | filter.recent_lowest_max(1, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, }, }}, {"select series_1 from -1000d to now resolution 30s", true, api.SeriesList{}}, } { a := assert.New(t).Contextf("query=%s", test.query) expected := test.expected command, err := Parse(test.query) if err != nil { a.Errorf("Unexpected error while parsing") continue } a.EqString(command.Name(), "select") rawResult, err := command.Execute(ExecutionContext{ Backend: backend.NewSequentialMultiBackend(fakeBackend), API: fakeApi, FetchLimit: 1000, Timeout: 10 * time.Millisecond, }) if err != nil { if !test.expectError { a.Errorf("Unexpected error while executing: %s", err.Error()) } } else { casted := rawResult.([]function.Value) actual, _ := casted[0].ToSeriesList(api.Timerange{}) a.EqInt(len(actual.Series), len(expected.Series)) if len(actual.Series) == len(expected.Series) { for i := 0; i < len(expected.Series); i++ { a.Eq(actual.Series[i].TagSet, expected.Series[i].TagSet) actualLength := len(actual.Series[i].Values) expectedLength := len(actual.Series[i].Values) a.Eq(actualLength, expectedLength) if actualLength == expectedLength { for j := 0; j < actualLength; j++ { a.EqFloat(actual.Series[i].Values[j], expected.Series[i].Values[j], epsilon) } } } } } } // Test that the limit is correct command, err := Parse("select series_1, series_2 from 0 to 120 resolution 30ms") if err != nil { t.Fatalf("Unexpected error while parsing") return } context := ExecutionContext{Backend: backend.NewSequentialMultiBackend(fakeBackend), API: fakeApi, FetchLimit: 3, Timeout: 0} _, err = command.Execute(context) if err != nil { t.Fatalf("expected success with limit 3 but got err = %s", err.Error()) return } context.FetchLimit = 2 _, err = command.Execute(context) if err == nil { t.Fatalf("expected failure with limit = 2") return } command, err = Parse("select series2 from 0 to 120 resolution 30ms") if err != nil { t.Fatalf("Unexpected error while parsing") return } _, err = command.Execute(context) if err != nil { t.Fatalf("expected success with limit = 2 but got %s", err.Error()) } }
func Test_AggregateBy(t *testing.T) { a := assert.New(t) timerange, err := api.NewTimerange(42, 270, 6) if err != nil { t.Fatalf("Timerange for test is invalid") return } var testList = api.SeriesList{ []api.Timeseries{ api.Timeseries{ Values: []float64{0, 1, 2}, TagSet: api.TagSet{ "env": "staging", "dc": "A", "host": "q77", }, }, api.Timeseries{ Values: []float64{4, 4, 4}, TagSet: api.TagSet{ "env": "staging", "dc": "B", "host": "r53", }, }, api.Timeseries{ Values: []float64{-1, -1, 2}, TagSet: api.TagSet{ "env": "production", "dc": "A", "host": "y1", }, }, api.Timeseries{ Values: []float64{0, 2, 0}, TagSet: api.TagSet{ "env": "production", "dc": "A", "host": "w20", }, }, api.Timeseries{ Values: []float64{2, 0, 0}, TagSet: api.TagSet{ "env": "production", "dc": "B", "host": "t8", }, }, api.Timeseries{ Values: []float64{0, 0, 1}, TagSet: api.TagSet{ "env": "production", "dc": "C", "host": "b38", }, }, api.Timeseries{ Values: []float64{math.NaN(), math.NaN(), math.NaN()}, TagSet: api.TagSet{ "env": "staging", "dc": "A", "host": "n44", }, }, api.Timeseries{ Values: []float64{math.NaN(), 10, math.NaN()}, TagSet: api.TagSet{ "env": "production", "dc": "B", "host": "n10", }, }, }, timerange, "Test.List", "", } var aggregatedTests = []struct { Tags []string Aggregator func([]float64) float64 Combines bool Results []api.Timeseries }{ { []string{"env"}, Sum, false, []api.Timeseries{ api.Timeseries{ Values: []float64{1, 11, 3}, TagSet: map[string]string{ "env": "production", }, }, api.Timeseries{ Values: []float64{4, 5, 6}, TagSet: map[string]string{ "env": "staging", }, }, }, }, { []string{"dc"}, Max, false, []api.Timeseries{ api.Timeseries{ Values: []float64{0, 2, 2}, TagSet: map[string]string{ "dc": "A", }, }, api.Timeseries{ Values: []float64{4, 10, 4}, TagSet: map[string]string{ "dc": "B", }, }, api.Timeseries{ Values: []float64{0, 0, 1}, TagSet: map[string]string{ "dc": "C", }, }, }, }, { []string{"dc", "env"}, Mean, false, []api.Timeseries{ api.Timeseries{ Values: []float64{0, 1, 2}, TagSet: map[string]string{ "dc": "A", "env": "staging", }, }, api.Timeseries{ Values: []float64{-1.0 / 2.0, 1.0 / 2.0, 1.0}, TagSet: map[string]string{ "dc": "A", "env": "production", }, }, api.Timeseries{ Values: []float64{4, 4, 4}, TagSet: map[string]string{ "dc": "B", "env": "staging", }, }, api.Timeseries{ Values: []float64{2, 5, 0}, TagSet: map[string]string{ "dc": "B", "env": "production", }, }, api.Timeseries{ Values: []float64{0, 0, 1}, TagSet: map[string]string{ "dc": "C", "env": "production", }, }, }, }, { []string{}, Sum, false, []api.Timeseries{ api.Timeseries{ Values: []float64{5, 16, 9}, TagSet: map[string]string{}, }, }, }, { []string{}, Total, false, []api.Timeseries{ { Values: []float64{8, 8, 8}, TagSet: map[string]string{}, }, }, }, { []string{"dc"}, Total, false, []api.Timeseries{ { Values: []float64{4, 4, 4}, TagSet: map[string]string{"dc": "A"}, }, { Values: []float64{3, 3, 3}, TagSet: map[string]string{"dc": "B"}, }, { Values: []float64{1, 1, 1}, TagSet: map[string]string{"dc": "C"}, }, }, }, { []string{}, Count, false, []api.Timeseries{ { Values: []float64{6, 7, 6}, TagSet: map[string]string{}, }, }, }, { []string{"dc"}, Count, false, []api.Timeseries{ { Values: []float64{3, 3, 3}, TagSet: map[string]string{"dc": "A"}, }, { Values: []float64{2, 3, 2}, TagSet: map[string]string{"dc": "B"}, }, { Values: []float64{1, 1, 1}, TagSet: map[string]string{"dc": "C"}, }, }, }, // Combine tests: { []string{"host"}, Sum, true, []api.Timeseries{ { Values: []float64{0, 1, 2}, TagSet: map[string]string{ "env": "staging", "dc": "A", }, }, { Values: []float64{4, 4, 4}, TagSet: map[string]string{ "env": "staging", "dc": "B", }, }, { Values: []float64{-1, 1, 2}, TagSet: map[string]string{ "env": "production", "dc": "A", }, }, { Values: []float64{2, 10, 0}, TagSet: map[string]string{ "env": "production", "dc": "B", }, }, { Values: []float64{0, 0, 1}, TagSet: map[string]string{ "env": "production", "dc": "C", }, }, }, }, { []string{"host", "dc", "env"}, Sum, true, []api.Timeseries{ { Values: []float64{5, 16, 9}, TagSet: map[string]string{}, }, }, }, { []string{"host"}, // This test verifies that aggregate.sum() on all NaN data returns NaN, not 0 Sum, false, []api.Timeseries{ api.Timeseries{ Values: []float64{0, 1, 2}, TagSet: api.TagSet{ "host": "q77", }, }, api.Timeseries{ Values: []float64{4, 4, 4}, TagSet: api.TagSet{ "host": "r53", }, }, api.Timeseries{ Values: []float64{-1, -1, 2}, TagSet: api.TagSet{ "host": "y1", }, }, api.Timeseries{ Values: []float64{0, 2, 0}, TagSet: api.TagSet{ "host": "w20", }, }, api.Timeseries{ Values: []float64{2, 0, 0}, TagSet: api.TagSet{ "host": "t8", }, }, api.Timeseries{ Values: []float64{0, 0, 1}, TagSet: api.TagSet{ "host": "b38", }, }, api.Timeseries{ Values: []float64{math.NaN(), math.NaN(), math.NaN()}, TagSet: api.TagSet{ "host": "n44", }, }, api.Timeseries{ Values: []float64{math.NaN(), 10, math.NaN()}, TagSet: api.TagSet{ "host": "n10", }, }, }, }, } for _, testCase := range aggregatedTests { aggregated := AggregateBy(testList, testCase.Aggregator, testCase.Tags, testCase.Combines) // Check that aggregated looks correct. // There should be two series if aggregated.Timerange != testList.Timerange { t.Errorf("Expected aggregate's Timerange to be %+v but is %+v", testList.Timerange, aggregated.Timerange) continue } if aggregated.Name != testList.Name { t.Errorf("Expected aggregate's Name to be %s but is %s", testList.Name, aggregated.Name) continue } if len(aggregated.Series) != len(testCase.Results) { t.Errorf("Expected %d series in aggregation result but found %d", len(testCase.Results), len(aggregated.Series)) continue } // Lastly, we have to check that the values are correct. // First, check that an aggregated series corresponding to each correct tagset: for _, series := range testCase.Results { found := false for _, aggregate := range aggregated.Series { if series.TagSet.Equals(aggregate.TagSet) { found = true break } } if !found { t.Fatalf("Expected to find series corresponding to %+v but could not in %+v", series.TagSet, aggregated) } } // Next, each series will do the reverse-lookup and check that its values match the expected results. // (It is neccesary to check both ways [see above] to ensure that the result doesn't contain just one of the series repeatedly) for _, aggregate := range aggregated.Series { // Any of the testCase results which it matches are candidates for _, correct := range testCase.Results { if aggregate.TagSet.Equals(correct.TagSet) { if len(aggregate.Values) != len(correct.Values) { t.Errorf("For tagset %+v, result %+v has a different length than expected %+v", correct.TagSet, aggregate.Values, correct.Values) continue } // Compare their values for i := range aggregate.Values { a = a.Contextf("for tagset %+v, result %+v did not match expected %+v", correct.TagSet, aggregate.Values, correct.Values) a.EqFloat(aggregate.Values[i], correct.Values[i], epsilon) } } } } } }
func TestFilter(t *testing.T) { a := assert.New(t) timerange, err := api.NewTimerange(1300, 1700, 100) if err != nil { t.Fatalf("invalid timerange used in testcase") } series := map[string]api.Timeseries{ "A": { Values: []float64{3, 3, 3, 3, 3}, TagSet: api.TagSet{ "name": "A", }, }, "B": { Values: []float64{1, 2, 2, 1, 0}, TagSet: api.TagSet{ "name": "B", }, }, "C": { Values: []float64{1, 2, 3, 4, 5.1}, TagSet: api.TagSet{ "name": "C", }, }, "D": { Values: []float64{4, 4, 3, 4, 3}, TagSet: api.TagSet{ "name": "D", }, }, } list := api.SeriesList{ Series: []api.Timeseries{series["A"], series["B"], series["C"], series["D"]}, Timerange: timerange, Name: "test_series", } tests := []struct { summary func([]float64) float64 lowest bool count int expect []string }{ { summary: aggregate.Sum, lowest: true, count: 6, expect: []string{"A", "B", "C", "D"}, }, { summary: aggregate.Sum, lowest: false, count: 6, expect: []string{"A", "B", "C", "D"}, }, { summary: aggregate.Sum, lowest: true, count: 4, expect: []string{"A", "B", "C", "D"}, }, { summary: aggregate.Sum, lowest: true, count: 3, expect: []string{"A", "B", "C"}, }, { summary: aggregate.Sum, lowest: true, count: 2, expect: []string{"A", "B"}, }, { summary: aggregate.Sum, lowest: true, count: 1, expect: []string{"B"}, }, { summary: aggregate.Sum, lowest: false, count: 4, expect: []string{"A", "B", "C", "D"}, }, { summary: aggregate.Sum, lowest: false, count: 3, expect: []string{"A", "C", "D"}, }, { summary: aggregate.Sum, lowest: false, count: 2, expect: []string{"C", "D"}, }, { summary: aggregate.Sum, lowest: false, count: 1, expect: []string{"D"}, }, { summary: aggregate.Max, lowest: false, count: 1, expect: []string{"C"}, }, { summary: aggregate.Max, lowest: false, count: 2, expect: []string{"C", "D"}, }, { summary: aggregate.Min, lowest: false, count: 2, expect: []string{"A", "D"}, }, { summary: aggregate.Min, lowest: false, count: 3, expect: []string{"A", "C", "D"}, }, } for _, test := range tests { filtered := FilterBy(list, test.count, test.summary, test.lowest) // Verify that every series in the result is from the original. // Also verify that we only get the ones we expect. if len(filtered.Series) != len(test.expect) { t.Errorf("Expected only %d in results but got %d", len(test.expect), len(filtered.Series)) continue } for _, s := range filtered.Series { original, ok := series[s.TagSet["name"]] if !ok { t.Fatalf("Result tagset called '%s' is not an original", s.TagSet["name"]) } a.EqFloatArray(original.Values, s.Values, 1e-7) } names := map[string]bool{} for _, name := range test.expect { names[name] = true } for _, s := range filtered.Series { if !names[s.TagSet["name"]] { t.Fatalf("TagSets %+v aren't expected; %+v are", filtered.Series, test.expect) } names[s.TagSet["name"]] = false // Use up the name so that a seocnd Series can't also use it. } } }
func Test_Blueflood(t *testing.T) { timerange, err := api.NewTimerange(12000, 13000, 1000) if err != nil { t.Fatalf("invalid testcase timerange") return } defaultClientConfig := Config{ "https://blueflood.url", "square", make(map[string]int64), time.Millisecond, 0, } // Not really MIN1440, but that's what default TTLs will get with the Timerange we use defaultQueryUrl := "https://blueflood.url/v2.0/square/views/some.key.graphite?from=12000&resolution=MIN1440&select=numPoints%2Caverage&to=14000" for _, test := range []struct { name string metricMap map[api.GraphiteMetric]api.TaggedMetric queryMetric api.TaggedMetric sampleMethod api.SampleMethod timerange api.Timerange clientConfig Config queryUrl string queryResponse string queryResponseCode int queryDelay time.Duration expectedErrorCode api.BackendErrorCode expectedSeriesList api.Timeseries }{ { name: "Success case", metricMap: map[api.GraphiteMetric]api.TaggedMetric{ api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, }, queryMetric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, sampleMethod: api.SampleMean, timerange: timerange, queryUrl: defaultQueryUrl, clientConfig: defaultClientConfig, queryResponse: `{ "unit": "unknown", "values": [ { "numPoints": 1, "timestamp": 12000, "average": 5 }, { "numPoints": 1, "timestamp": 13000, "average": 3 } ], "metadata": { "limit": null, "next_href": null, "count": 2, "marker": null } }`, expectedSeriesList: api.Timeseries{ Values: []float64{5, 3}, TagSet: api.ParseTagSet("tag=value"), }, }, { name: "Failure case - invalid JSON", metricMap: map[api.GraphiteMetric]api.TaggedMetric{ api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, }, queryMetric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, sampleMethod: api.SampleMean, timerange: timerange, clientConfig: defaultClientConfig, queryUrl: defaultQueryUrl, queryResponse: `{invalid}`, expectedErrorCode: api.FetchIOError, }, { name: "Failure case - HTTP error", metricMap: map[api.GraphiteMetric]api.TaggedMetric{ api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, }, queryMetric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, sampleMethod: api.SampleMean, timerange: timerange, clientConfig: defaultClientConfig, queryUrl: defaultQueryUrl, queryResponse: `{}`, queryResponseCode: 400, expectedErrorCode: api.FetchIOError, }, { name: "Failure case - timeout", metricMap: map[api.GraphiteMetric]api.TaggedMetric{ api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, }, queryMetric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, sampleMethod: api.SampleMean, timerange: timerange, clientConfig: defaultClientConfig, queryUrl: defaultQueryUrl, queryResponse: `{}`, queryDelay: 1 * time.Second, expectedErrorCode: api.FetchTimeoutError, }, } { a := assert.New(t).Contextf("%s", test.name) fakeApi := mocks.NewFakeApi() for k, v := range test.metricMap { fakeApi.AddPair(v, k) } fakeHttpClient := mocks.NewFakeHttpClient() code := test.queryResponseCode if code == 0 { code = http.StatusOK } fakeHttpClient.SetResponse(test.queryUrl, mocks.Response{test.queryResponse, test.queryDelay, code}) b := NewBlueflood(test.clientConfig).(*blueflood) b.client = fakeHttpClient seriesList, err := b.FetchSingleSeries(api.FetchSeriesRequest{ Metric: test.queryMetric, SampleMethod: test.sampleMethod, Timerange: test.timerange, API: fakeApi, Cancellable: api.NewCancellable(), }) if test.expectedErrorCode != 0 { if err == nil { a.Errorf("Expected error, but was successful.") continue } berr, ok := err.(api.BackendError) if !ok { a.Errorf("Failed to cast error to BackendError") continue } a.Eq(berr.Code, test.expectedErrorCode) } else { if err != nil { a.CheckError(err) continue } a.Eq(seriesList, test.expectedSeriesList) } } }
func TestApplyTransformNaN(t *testing.T) { var testTimerange, err = api.NewTimerange(758400000, 758400000+30000*5, 30000) if err != nil { t.Fatalf("invalid timerange used for testcase") return } nan := math.NaN() list := api.SeriesList{ Series: []api.Timeseries{ { Values: []float64{0, 1, nan, 3, 4, 5}, TagSet: api.TagSet{ "series": "A", }, }, { Values: []float64{2, nan, nan, nan, 3, 3}, TagSet: api.TagSet{ "series": "B", }, }, { Values: []float64{0, 1, 2, nan, 2, 1}, TagSet: api.TagSet{ "series": "C", }, }, }, Timerange: testTimerange, Name: "test", } tests := []struct { transform transform parameters []function.Value expected map[string][]float64 }{ { transform: Derivative, parameters: []function.Value{}, expected: map[string][]float64{ "A": {0, 1.0 / 30, nan, nan, 1.0 / 30, 1.0 / 30}, "B": {0, nan, nan, nan, nan, 0.0}, "C": {0, 1.0 / 30, 1.0 / 30, nan, nan, -1.0 / 30}, }, }, { transform: Integral, parameters: []function.Value{}, expected: map[string][]float64{ "A": {0, 1 * 30, 1 * 30, 4 * 30, 8 * 30, 13 * 30}, "B": {2 * 30, 2 * 30, 2 * 30, 2 * 30, 5 * 30, 8 * 30}, "C": {0, 1 * 30, 3 * 30, 3 * 30, 5 * 30, 6 * 30}, }, }, { transform: Rate, parameters: []function.Value{}, expected: map[string][]float64{ "A": {0, 1 / 30.0, nan, nan, 1 / 30.0, 1 / 30.0}, "B": {0, nan, nan, nan, nan, 0}, "C": {0, 1 / 30.0, 1 / 30.0, nan, nan, 0}, }, }, { transform: Cumulative, parameters: []function.Value{}, expected: map[string][]float64{ "A": {0, 1, 1, 4, 8, 13}, "B": {2, 2, 2, 2, 5, 8}, "C": {0, 1, 3, 3, 5, 6}, }, }, { transform: Default, parameters: []function.Value{function.ScalarValue(17)}, expected: map[string][]float64{ "A": {0, 1, 17, 3, 4, 5}, "B": {2, 17, 17, 17, 3, 3}, "C": {0, 1, 2, 17, 2, 1}, }, }, { transform: NaNKeepLast, parameters: []function.Value{}, expected: map[string][]float64{ "A": {0, 1, 1, 3, 4, 5}, "B": {2, 2, 2, 2, 3, 3}, "C": {0, 1, 2, 2, 2, 1}, }, }, } for _, test := range tests { result, err := ApplyTransform(list, test.transform, test.parameters) if err != nil { t.Fatalf(fmt.Sprintf("error applying transformation %s", err)) return } for _, series := range result.Series { values := series.Values expected := test.expected[series.TagSet["series"]] if len(values) != len(expected) { t.Errorf("values != expected; %+v != %+v", values, expected) continue } for i := range values { v := values[i] e := expected[i] if (math.IsNaN(e) != math.IsNaN(v)) || (!math.IsNaN(e) && math.Abs(v-e) > 1e-7) { t.Errorf("(actual) %+v != %+v (expected)", values, expected) break } } } } }
func TestFilterRecent(t *testing.T) { timerange, err := api.NewTimerange(1300, 2000, 100) a := assert.New(t) a.CheckError(err) series := []api.Timeseries{ { Values: []float64{0, 1, 1, 0, 8, 8, 9, 8}, TagSet: api.TagSet{"name": "A"}, }, { Values: []float64{-5, -6, -3, -4, 5, 6, 7, 8}, TagSet: api.TagSet{"name": "B"}, }, { Values: []float64{7, 7, 6, 7, 3, 2, 1, 1}, TagSet: api.TagSet{"name": "C"}, }, { Values: []float64{6, 5, 5, 5, 2, 2, 3, 3}, TagSet: api.TagSet{"name": "D"}, }, } list := api.SeriesList{ Series: series, Timerange: timerange, } seriesMap := map[string]api.Timeseries{"A": series[0], "B": series[1], "C": series[2], "D": series[3]} tests := []struct { summary func([]float64) float64 lowest bool count int duration time.Duration expect []string }{ { summary: aggregate.Max, lowest: false, count: 50, duration: time.Millisecond * 450, // Four points expect: []string{"A", "B", "C", "D"}, }, { summary: aggregate.Min, lowest: true, count: 5, duration: time.Millisecond * 450, // Four points expect: []string{"A", "B", "C", "D"}, }, { summary: aggregate.Mean, lowest: false, count: 4, duration: time.Millisecond * 450, // Four points expect: []string{"A", "B", "C", "D"}, }, { summary: aggregate.Max, lowest: false, count: 2, duration: time.Millisecond * 450, // Four points expect: []string{"A", "B"}, }, { summary: aggregate.Max, lowest: true, count: 2, duration: time.Millisecond * 450, // Four points expect: []string{"C", "D"}, }, { summary: aggregate.Sum, lowest: true, count: 1, duration: time.Millisecond * 9000, // All points expect: []string{"B"}, }, { summary: aggregate.Sum, lowest: false, count: 1, duration: time.Millisecond * 9000, // All points expect: []string{"A"}, }, } for _, test := range tests { filtered := FilterRecentBy(list, test.count, test.summary, test.lowest, test.duration) // Verify that they're all unique and expected and unchanged a.EqInt(len(filtered.Series), len(test.expect)) // Next, verify that the names are the same. correct := map[string]bool{} for _, name := range test.expect { correct[name] = true } for _, series := range filtered.Series { name := series.TagSet["name"] if !correct[name] { t.Errorf("Expected %+v but got %+v", test.expect, filtered.Series) break } correct[name] = false // Delete it so that there can be no repeats. a.EqFloatArray(series.Values, seriesMap[name].Values, 1e-7) } } }
func TestApplyTransform(t *testing.T) { var testTimerange, err = api.NewTimerange(758400000, 758400000+30000*5, 30000) if err != nil { t.Fatalf("invalid timerange used for testcase") return } epsilon := 1e-10 list := api.SeriesList{ Series: []api.Timeseries{ { Values: []float64{0, 1, 2, 3, 4, 5}, TagSet: api.TagSet{ "series": "A", }, }, { Values: []float64{2, 2, 1, 1, 3, 3}, TagSet: api.TagSet{ "series": "B", }, }, { Values: []float64{0, 1, 2, 3, 2, 1}, TagSet: api.TagSet{ "series": "C", }, }, }, Timerange: testTimerange, Name: "test", } testCases := []struct { transform transform parameter []function.Value expected map[string][]float64 }{ { transform: Derivative, parameter: []function.Value{}, expected: map[string][]float64{ "A": {0, 1.0 / 30, 1.0 / 30, 1.0 / 30, 1.0 / 30, 1.0 / 30}, "B": {0, 0, -1.0 / 30, 0, 2.0 / 30, 0}, "C": {0, 1.0 / 30, 1.0 / 30, 1.0 / 30, -1.0 / 30, -1.0 / 30}, }, }, { transform: Integral, parameter: []function.Value{}, expected: map[string][]float64{ "A": {0, 1 * 30, 3 * 30, 6 * 30, 10 * 30, 15 * 30}, "B": {2 * 30, 4 * 30, 5 * 30, 6 * 30, 9 * 30, 12 * 30}, "C": {0, 1 * 30, 3 * 30, 6 * 30, 8 * 30, 9 * 30}, }, }, { transform: Cumulative, parameter: []function.Value{}, expected: map[string][]float64{ "A": {0, 1, 3, 6, 10, 15}, "B": {2, 4, 5, 6, 9, 12}, "C": {0, 1, 3, 6, 8, 9}, }, }, } for _, test := range testCases { result, err := ApplyTransform(list, test.transform, test.parameter) if err != nil { t.Error(err) continue } alreadyUsed := make(map[string]bool) for _, series := range result.Series { name := series.TagSet["series"] expected, ok := test.expected[name] if !ok { t.Errorf("Series not present in testcase (A, B, or C). Is instead [%s]", name) continue } if alreadyUsed[name] { t.Errorf("Multiple series posing as %s", name) continue } alreadyUsed[name] = true // Lastly, compare the actual values if len(series.Values) != len(expected) { t.Errorf("Expected result to have %d entries but has %d entries; for series %s", len(expected), len(series.Values), name) continue } // Check that elements are within epsilon for i := range series.Values { if math.Abs(series.Values[i]-expected[i]) > epsilon { t.Errorf("Expected values for series %s to be %+v but are %+v", name, expected, series.Values) break } } } } }
func TestSet(t *testing.T) { timerange, err := api.NewTimerange(1300, 1600, 100) if err != nil { t.Fatal("invalid timerange used in testcase") } newValue := "east" list := api.SeriesList{ Timerange: timerange, Name: "ExampleTestSeries!", Series: []api.Timeseries{ { Values: []float64{1, 2, 3, 4}, TagSet: api.TagSet{ "name": "A", "host": "q12", }, }, { Values: []float64{6, 7, 3, 1}, TagSet: api.TagSet{ "name": "B", "host": "r2", }, }, { Values: []float64{2, 4, 6, 8}, TagSet: api.TagSet{ "name": "C", "host": "q12", "dc": "south", }, }, { Values: []float64{5, math.NaN(), 2, math.NaN()}, TagSet: api.TagSet{ "name": "D", "host": "q12", "dc": "south", }, }, }, } result := SetTag(list, "dc", newValue) expect := api.SeriesList{ Timerange: timerange, Name: "ExampleTestSeries!", Series: []api.Timeseries{ { Values: []float64{1, 2, 3, 4}, TagSet: api.TagSet{ "name": "A", "host": "q12", "dc": "east", }, }, { Values: []float64{6, 7, 3, 1}, TagSet: api.TagSet{ "name": "B", "host": "r2", "dc": "east", }, }, { Values: []float64{2, 4, 6, 8}, TagSet: api.TagSet{ "name": "C", "host": "q12", "dc": "east", }, }, { Values: []float64{5, math.NaN(), 2, math.NaN()}, TagSet: api.TagSet{ "name": "D", "host": "q12", "dc": "east", }, }, }, } // Verify that result == expect a := assert.New(t) a.EqString(result.Name, expect.Name) a.Eq(result.Timerange, expect.Timerange) a.EqInt(len(result.Series), len(expect.Series)) for i := range result.Series { // Verify that the two are equal seriesResult := result.Series[i] seriesExpect := expect.Series[i] a.EqFloatArray(seriesResult.Values, seriesExpect.Values, 1e-7) if !seriesResult.TagSet.Equals(seriesExpect.TagSet) { t.Errorf("Expected series %+v, but got %+v", seriesExpect, seriesResult) } } }
func TestApplyBound(t *testing.T) { a := assert.New(t) testTimerange, err := api.NewTimerange(758400000, 758400000+30000*5, 30000) //{2, nan, nan, nan, 3, 3}, if err != nil { t.Fatal("invalid timerange used for testcase") return } list := api.SeriesList{ Series: []api.Timeseries{ { Values: []float64{1, 2, 3, 4, 5, 6}, TagSet: api.TagSet{ "name": "A", }, }, { Values: []float64{5, 5, 3, -7, math.NaN(), -20}, TagSet: api.TagSet{ "name": "B", }, }, { Values: []float64{math.NaN(), 100, 90, 0, 0, 3}, TagSet: api.TagSet{ "name": "C", }, }, }, Timerange: testTimerange, Name: "test", } tests := []struct { lower float64 upper float64 expectBound map[string][]float64 expectLower map[string][]float64 expectUpper map[string][]float64 }{ { lower: 2, upper: 5, expectBound: map[string][]float64{ "A": {2, 2, 3, 4, 5, 5}, "B": {5, 5, 3, 2, math.NaN(), 2}, "C": {math.NaN(), 5, 5, 2, 2, 3}, }, expectLower: map[string][]float64{ "A": {2, 2, 3, 4, 5, 6}, "B": {5, 5, 3, 2, math.NaN(), 2}, "C": {math.NaN(), 100, 90, 2, 2, 3}, }, expectUpper: map[string][]float64{ "A": {1, 2, 3, 4, 5, 5}, "B": {5, 5, 3, -7, math.NaN(), -20}, "C": {math.NaN(), 5, 5, 0, 0, 3}, }, }, { lower: -10, upper: 40, expectBound: map[string][]float64{ "A": {1, 2, 3, 4, 5, 6}, "B": {5, 5, 3, -7, math.NaN(), -10}, "C": {math.NaN(), 40, 40, 0, 0, 3}, }, expectLower: map[string][]float64{ "A": {1, 2, 3, 4, 5, 6}, "B": {5, 5, 3, -7, math.NaN(), -10}, "C": {math.NaN(), 100, 90, 0, 0, 3}, }, expectUpper: map[string][]float64{ "A": {1, 2, 3, 4, 5, 6}, "B": {5, 5, 3, -7, math.NaN(), -20}, "C": {math.NaN(), 40, 40, 0, 0, 3}, }, }, } for _, test := range tests { bounders := []struct { bounder func(ctx *function.EvaluationContext, series api.Timeseries, parameters []function.Value, scale float64) ([]float64, error) params []function.Value expected map[string][]float64 name string }{ {bounder: Bound, params: []function.Value{function.ScalarValue(test.lower), function.ScalarValue(test.upper)}, expected: test.expectBound, name: "bound"}, {bounder: LowerBound, params: []function.Value{function.ScalarValue(test.lower)}, expected: test.expectLower, name: "lower"}, {bounder: UpperBound, params: []function.Value{function.ScalarValue(test.upper)}, expected: test.expectUpper, name: "upper"}, } for _, bounder := range bounders { ctx := function.EvaluationContext{EvaluationNotes: []string{}} bounded, err := ApplyTransform(&ctx, list, bounder.bounder, bounder.params) if err != nil { t.Errorf(err.Error()) continue } if len(bounded.Series) != len(list.Series) { t.Errorf("Expected to get %d results but got %d in %+v", len(list.Series), len(bounded.Series), bounded) continue } // Next, check they're all unique and such alreadyUsed := map[string]bool{} for _, series := range bounded.Series { if alreadyUsed[series.TagSet["name"]] { t.Fatalf("Repeating name `%s`", series.TagSet["name"]) } alreadyUsed[series.TagSet["name"]] = true // Next, verify that it's what we expect a.EqFloatArray(series.Values, bounder.expected[series.TagSet["name"]], 3e-7) } } } ctx := function.EvaluationContext{EvaluationNotes: []string{}} if _, err = ApplyTransform(&ctx, list, Bound, []function.Value{function.ScalarValue(18), function.ScalarValue(17)}); err == nil { t.Fatalf("Expected error on invalid bounds") } if _, err = ApplyTransform(&ctx, list, Bound, []function.Value{function.ScalarValue(-17), function.ScalarValue(-18)}); err == nil { t.Fatalf("Expected error on invalid bounds") } }
func TestTransformTimeseries(t *testing.T) { //This is to make sure that the scale of all the data //is interpreted as 30 seconds (30000 milliseconds) timerange, _ := api.NewTimerange(0, int64(30000*5), int64(30000)) testCases := []struct { series api.Timeseries values []float64 tagSet api.TagSet parameters []function.Value timerange api.Timerange tests []struct { fun transform expected []float64 useParam bool } }{ { values: []float64{0, 1, 2, 3, 4, 5}, tagSet: api.TagSet{ "dc": "A", "host": "B", "env": "C", }, timerange: timerange, parameters: []function.Value{function.ScalarValue(100)}, tests: []struct { fun transform expected []float64 useParam bool }{ { fun: derivative, expected: []float64{1.0 / 30.0, 1.0 / 30.0, 1.0 / 30.0, 1.0 / 30.0, 1.0 / 30.0}, useParam: false, }, { fun: Integral, expected: []float64{0.0, 1.0 * 30.0, 3.0 * 30.0, 6.0 * 30.0, 10.0 * 30.0, 15.0 * 30.0}, useParam: false, }, { fun: MapMaker(func(x float64) float64 { return -x }), expected: []float64{0, -1, -2, -3, -4, -5}, useParam: false, }, { fun: NaNKeepLast, expected: []float64{0, 1, 2, 3, 4, 5}, useParam: false, }, { fun: rate, expected: []float64{1.0 / 30.0, 1.0 / 30.0, 1.0 / 30.0, 1.0 / 30.0, 1.0 / 30.0}, useParam: false, }, }, }, } epsilon := 1e-10 for _, test := range testCases { series := api.Timeseries{ Values: test.values, TagSet: test.tagSet, } for _, transform := range test.tests { params := test.parameters if !transform.useParam { params = []function.Value{} } ctx := function.EvaluationContext{EvaluationNotes: []string{}} seriesList := api.SeriesList{ Series: []api.Timeseries{series}, Timerange: timerange, } a, err := ApplyTransform(&ctx, seriesList, transform.fun, params) result := a.Series[0] if err != nil { t.Error(err) continue } if !result.TagSet.Equals(test.tagSet) { t.Errorf("Expected tagset to be unchanged by transform, changed %+v into %+v", test.tagSet, result.TagSet) continue } if len(result.Values) != len(transform.expected) { t.Errorf("Expected result to have length %d but has length %d", len(transform.expected), len(result.Values)) continue } // Now check that the values are approximately equal for i := range result.Values { if math.Abs(result.Values[i]-transform.expected[i]) > epsilon { t.Errorf("Expected %+v but got %+v", transform.expected, result.Values) break } } } } }
if err != nil { return nil, err } size, err := sizeValue.ToDuration() if err != nil { return nil, err } limit := int(float64(size/time.Millisecond)/float64(context.Timerange.Resolution()) + 0.5) // Limit is the number of items to include in the average if limit < 1 { // At least one value must be included at all times limit = 1 } newContext := context timerange := context.Timerange newContext.Timerange, err = api.NewTimerange(timerange.Start()-int64(limit-1)*timerange.Resolution(), timerange.End(), timerange.Resolution()) if err != nil { return nil, err } // The new context has a timerange which is extended beyond the query's. listValue, err := arguments[0].Evaluate(newContext) if err != nil { return nil, err } // This value must be a SeriesList. list, err := listValue.ToSeriesList(newContext.Timerange) if err != nil { return nil, err }