func TestMovingAverage(t *testing.T) { fakeAPI := mocks.NewFakeMetricMetadataAPI() fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series", api.NewTagSet()}) fakeBackend := movingAverageBackend{} timerange, err := api.NewTimerange(1200, 1500, 100) if err != nil { t.Fatalf(err.Error()) } expression := &functionExpression{ functionName: "transform.moving_average", groupBy: []string{}, arguments: []function.Expression{ &metricFetchExpression{"series", api.TruePredicate}, durationExpression{"300ms", 300 * time.Millisecond}, }, } backend := fakeBackend result, err := evaluateToSeriesList(expression, &function.EvaluationContext{ MetricMetadataAPI: fakeAPI, TimeseriesStorageAPI: backend, Timerange: timerange, SampleMethod: api.SampleMean, FetchLimit: function.NewFetchCounter(1000), Registry: registry.Default(), Cancellable: api.NewCancellable(), OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) if err != nil { t.Errorf(err.Error()) } expected := []float64{4, 3, 11.0 / 3, 5} if len(result.Series) != 1 { t.Fatalf("expected exactly 1 returned series") } if len(result.Series[0].Values) != len(expected) { t.Fatalf("expected exactly %d values in returned series, but got %d", len(expected), len(result.Series[0].Values)) } const eps = 1e-7 for i := range expected { if math.Abs(result.Series[0].Values[i]-expected[i]) > eps { t.Fatalf("expected %+v but got %+v", expected, result.Series[0].Values) } } }
func TestCommand_Describe(t *testing.T) { fakeAPI := mocks.NewFakeMetricMetadataAPI() fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_0", api.ParseTagSet("dc=west,env=production,host=a")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_0", api.ParseTagSet("dc=west,env=staging,host=b")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_0", api.ParseTagSet("dc=east,env=production,host=c")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_0", api.ParseTagSet("dc=east,env=staging,host=d")}, emptyGraphiteName) for _, test := range []struct { query string metricmetadata api.MetricMetadataAPI expected map[string][]string }{ {"describe series_0", fakeAPI, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c", "d"}}}, {"describe`series_0`", fakeAPI, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c", "d"}}}, {"describe series_0 where dc='west'", fakeAPI, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}}, {"describe`series_0`where(dc='west')", fakeAPI, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}}, {"describe series_0 where dc='west' or env = 'production'", fakeAPI, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c"}}}, {"describe series_0 where`dc`='west'or`env`='production'", fakeAPI, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c"}}}, {"describe series_0 where dc='west' or env = 'production' and doesnotexist = ''", fakeAPI, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}}, {"describe series_0 where env = 'production' and doesnotexist = '' or dc = 'west'", fakeAPI, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}}, {"describe series_0 where (dc='west' or env = 'production') and doesnotexist = ''", fakeAPI, map[string][]string{}}, {"describe series_0 where(dc='west' or env = 'production')and`doesnotexist` = ''", fakeAPI, map[string][]string{}}, } { a := assert.New(t).Contextf("query=%s", test.query) command, err := Parse(test.query) if err != nil { a.Errorf("Unexpected error while parsing") continue } a.EqString(command.Name(), "describe") fakeTimeseriesStorage := mocks.FakeTimeseriesStorageAPI{} rawResult, err := command.Execute(ExecutionContext{ TimeseriesStorageAPI: fakeTimeseriesStorage, MetricMetadataAPI: test.metricmetadata, FetchLimit: 1000, Timeout: 0, OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) a.CheckError(err) a.Eq(rawResult, test.expected) } }
func TestCommand_DescribeAll(t *testing.T) { fakeAPI := mocks.NewFakeMetricMetadataAPI() fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_0", api.ParseTagSet("")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_1", api.ParseTagSet("")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_2", api.ParseTagSet("")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_3", api.ParseTagSet("")}, emptyGraphiteName) for _, test := range []struct { query string metricmetadata api.MetricMetadataAPI expected []api.MetricKey }{ {"describe all", fakeAPI, []api.MetricKey{"series_0", "series_1", "series_2", "series_3"}}, {"describe all match '_0'", fakeAPI, []api.MetricKey{"series_0"}}, {"describe all match '_5'", fakeAPI, []api.MetricKey{}}, } { a := assert.New(t).Contextf("query=%s", test.query) command, err := Parse(test.query) if err != nil { a.Errorf("Unexpected error while parsing") continue } a.EqString(command.Name(), "describe all") fakeMulti := mocks.FakeTimeseriesStorageAPI{} rawResult, err := command.Execute(ExecutionContext{ TimeseriesStorageAPI: fakeMulti, MetricMetadataAPI: test.metricmetadata, FetchLimit: 1000, Timeout: 0, OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) a.CheckError(err) a.Eq(rawResult, test.expected) } }
func TestTag(t *testing.T) { fakeAPI := mocks.NewFakeMetricMetadataAPI() fakeBackend := mocks.FakeTimeseriesStorageAPI{} tests := []struct { query string expected api.SeriesList }{ { query: "select series_2 | tag.drop('dc') from 0 to 120", expected: api.SeriesList{ Series: []api.Timeseries{ { Values: []float64{1, 2, 3, 4, 5}, TagSet: api.TagSet{}, }, { Values: []float64{3, 0, 3, 6, 2}, TagSet: api.TagSet{}, }, }, }, }, { query: "select series_2 | tag.drop('none') from 0 to 120", expected: api.SeriesList{ Series: []api.Timeseries{ { Values: []float64{1, 2, 3, 4, 5}, TagSet: api.TagSet{"dc": "west"}, }, { Values: []float64{3, 0, 3, 6, 2}, TagSet: api.TagSet{"dc": "east"}, }, }, }, }, { query: "select series_2 | tag.set('dc', 'north') from 0 to 120", expected: api.SeriesList{ Series: []api.Timeseries{ { Values: []float64{1, 2, 3, 4, 5}, TagSet: api.TagSet{"dc": "north"}, }, { Values: []float64{3, 0, 3, 6, 2}, TagSet: api.TagSet{"dc": "north"}, }, }, }, }, { query: "select series_2 | tag.set('none', 'north') from 0 to 120", expected: api.SeriesList{ Series: []api.Timeseries{ { Values: []float64{1, 2, 3, 4, 5}, TagSet: api.TagSet{"dc": "west", "none": "north"}, }, { Values: []float64{3, 0, 3, 6, 2}, TagSet: api.TagSet{"dc": "east", "none": "north"}, }, }, }, }, } for _, test := range tests { command, err := Parse(test.query) if err != nil { t.Fatalf("Unexpected error while parsing") return } if command.Name() != "select" { t.Errorf("Expected select command but got %s", command.Name()) continue } rawResult, err := command.Execute(ExecutionContext{ TimeseriesStorageAPI: fakeBackend, MetricMetadataAPI: fakeAPI, FetchLimit: 1000, Timeout: 0, OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) if err != nil { t.Errorf("Unexpected error while execution: %s", err.Error()) continue } seriesListList, ok := rawResult.([]api.SeriesList) if !ok || len(seriesListList) != 1 { t.Errorf("expected query `%s` to produce []value; got %+v :: %T", test.query, rawResult, rawResult) continue } list := seriesListList[0] if err != nil { t.Fatal(err) } a := assert.New(t) expectedSeries := test.expected.Series for i, series := range list.Series { a.EqFloatArray(series.Values, expectedSeries[i].Values, 1e-100) if !series.TagSet.Equals(expectedSeries[i].TagSet) { t.Errorf("expected tagset %+v but got %+v for series %d of query %s", expectedSeries[i].TagSet, series.TagSet, i, test.query) } } } }
func TestQuery(t *testing.T) { fakeAPI := mocks.NewFakeMetricMetadataAPI() fakeBackend := mocks.FakeTimeseriesStorageAPI{} tests := []struct { query string expected string }{ { query: "select series_1 from 0 to 0", expected: "series_1", }, { query: "select series_1 + 17 from 0 to 0", expected: "(series_1 + 17)", }, { query: "select series_1 + 2342.32 from 0 to 0", expected: "(series_1 + 2342.32)", }, { query: "select series_1*17 from 0 to 0", expected: "(series_1 * 17)", }, { query: "select aggregate.sum(series_1) from 0 to 0", expected: "aggregate.sum(series_1)", }, { query: "select aggregate.sum(series_1 group by dc) from 0 to 0", expected: "aggregate.sum(series_1 group by dc)", }, { query: "select aggregate.sum(series_1 group by dc,env) from 0 to 0", expected: "aggregate.sum(series_1 group by dc, env)", }, { query: "select aggregate.sum(series_1 collapse by dc) from 0 to 0", expected: "aggregate.sum(series_1 collapse by dc)", }, { query: "select aggregate.sum(series_1 collapse by dc,env) from 0 to 0", expected: "aggregate.sum(series_1 collapse by dc, env)", }, { query: "select transform.alias(aggregate.sum(series_1 group by dc,env), 'hello') from 0 to 0", expected: "transform.alias(aggregate.sum(series_1 group by dc, env), \"hello\")", }, { query: "select transform.moving_average(series_2, 2h) from 0 to 0", expected: "transform.moving_average(series_2, 2h)", }, { query: "select filter.lowest_max(series_2, 6) from 0 to 0", expected: "filter.lowest_max(series_2, 6)", }, } for _, test := range tests { command, err := Parse(test.query) if err != nil { t.Fatalf("Unexpected error while parsing") return } if command.Name() != "select" { t.Errorf("Expected select command but got %s", command.Name()) continue } rawResult, err := command.Execute(ExecutionContext{ TimeseriesStorageAPI: fakeBackend, MetricMetadataAPI: fakeAPI, FetchLimit: 1000, Timeout: 0, OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) if err != nil { t.Errorf("Unexpected error while execution: %s", err.Error()) continue } seriesListList, ok := rawResult.([]api.SeriesList) if !ok || len(seriesListList) != 1 { t.Errorf("expected query `%s` to produce []value; got %+v :: %T", test.query, rawResult, rawResult) continue } actual := seriesListList[0].Query if actual != test.expected { t.Errorf("Expected `%s` but got `%s` for query `%s`", test.expected, actual, test.query) continue } } }
func TestCommand_Select(t *testing.T) { epsilon := 1e-10 fakeAPI := mocks.NewFakeMetricMetadataAPI() fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_1", api.ParseTagSet("dc=west")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_2", api.ParseTagSet("dc=east")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_2", api.ParseTagSet("dc=west")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_3", api.ParseTagSet("dc=west")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_3", api.ParseTagSet("dc=east")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_3", api.ParseTagSet("dc=north")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_timeout", api.ParseTagSet("dc=west")}, emptyGraphiteName) var fakeBackend mocks.FakeTimeseriesStorageAPI testTimerange, err := api.NewTimerange(0, 120, 30) if err != nil { t.Errorf("Invalid test timerange") return } earlyTimerange, err := api.NewTimerange(0, 60, 30) if err != nil { t.Errorf("Invalid test timerange") } lateTimerange, err := api.NewTimerange(60, 120, 30) if err != nil { t.Errorf("Invalid test timerange") } for _, test := range []struct { query string expectError bool expected api.SeriesList }{ {"select does_not_exist from 0 to 120 resolution 30ms", true, api.SeriesList{}}, {"select series_1 from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{1, 2, 3, 4, 5}, api.ParseTagSet("dc=west"), }}, Timerange: testTimerange, Name: "series_1", }}, {"select series_timeout from 0 to 120 resolution 30ms", true, api.SeriesList{}}, {"select series_1 + 1 from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 3, 4, 5, 6}, api.ParseTagSet("dc=west"), }}, Timerange: testTimerange, Name: "", }}, {"select series_1 * 2 from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 4, 6, 8, 10}, api.ParseTagSet("dc=west"), }}, Timerange: testTimerange, Name: "", }}, {"select aggregate.max(series_2) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{3, 2, 3, 6, 5}, api.NewTagSet(), }}, Timerange: testTimerange, Name: "series_2", }}, {"select (1 + series_2) | aggregate.max from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{4, 3, 4, 7, 6}, api.NewTagSet(), }}, Timerange: testTimerange, Name: "series_2", }}, {"select series_1 from 0 to 60 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{1, 2, 3}, api.ParseTagSet("dc=west"), }}, Timerange: earlyTimerange, Name: "series_1", }}, {"select transform.timeshift(series_1,31ms) from 0 to 60 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 3, 4}, api.ParseTagSet("dc=west"), }}, Timerange: earlyTimerange, Name: "series_1", }}, {"select transform.timeshift(series_1,62ms) from 0 to 60 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{3, 4, 5}, api.ParseTagSet("dc=west"), }}, Timerange: earlyTimerange, Name: "series_1", }}, {"select transform.timeshift(series_1,29ms) from 0 to 60 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 3, 4}, api.ParseTagSet("dc=west"), }}, Timerange: earlyTimerange, Name: "series_1", }}, {"select transform.timeshift(series_1,-31ms) from 60 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 3, 4}, api.ParseTagSet("dc=west"), }}, Timerange: lateTimerange, Name: "series_1", }}, {"select transform.timeshift(series_1,-29ms) from 60 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 3, 4}, api.ParseTagSet("dc=west"), }}, Timerange: lateTimerange, Name: "series_1", }}, {"select series_3 from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, }, }}, {"select series_3 | filter.recent_highest_max(3, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, }, }}, {"select series_3 | filter.recent_highest_max(2, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, }, }}, {"select series_3 | filter.recent_highest_max(1, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, }, }}, {"select series_3 | filter.recent_lowest_max(3, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, }, }}, {"select series_3 | filter.recent_lowest_max(4, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, }, }}, {"select series_3 | filter.recent_highest_max(70, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, }, }}, {"select series_3 | filter.recent_lowest_max(2, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, }, }}, {"select series_3 | filter.recent_lowest_max(1, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, }, }}, {"select series_3 | filter.recent_highest_max(3, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, }, }}, {"select series_3 | filter.recent_highest_max(2, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, }, }}, {"select series_3 | filter.recent_highest_max(1, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, }, }}, {"select series_3 | filter.recent_lowest_max(3, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, }, }}, {"select series_3 | filter.recent_lowest_max(2, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, }, }}, {"select series_3 | filter.recent_lowest_max(1, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, }, }}, {"select series_1 from -1000d to now resolution 30s", true, api.SeriesList{}}, } { a := assert.New(t).Contextf("query=%s", test.query) expected := test.expected command, err := Parse(test.query) if err != nil { a.Errorf("Unexpected error while parsing") continue } a.EqString(command.Name(), "select") rawResult, err := command.Execute(ExecutionContext{ TimeseriesStorageAPI: fakeBackend, MetricMetadataAPI: fakeAPI, FetchLimit: 1000, Timeout: 100 * time.Millisecond, OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) if err != nil { if !test.expectError { a.Errorf("Unexpected error while executing: %s", err.Error()) } } else { casted := rawResult.([]function.Value) actual, _ := casted[0].ToSeriesList(api.Timerange{}) a.EqInt(len(actual.Series), len(expected.Series)) if len(actual.Series) == len(expected.Series) { for i := 0; i < len(expected.Series); i++ { a.Eq(actual.Series[i].TagSet, expected.Series[i].TagSet) actualLength := len(actual.Series[i].Values) expectedLength := len(actual.Series[i].Values) a.Eq(actualLength, expectedLength) if actualLength == expectedLength { for j := 0; j < actualLength; j++ { a.EqFloat(actual.Series[i].Values[j], expected.Series[i].Values[j], epsilon) } } } } } } // Test that the limit is correct command, err := Parse("select series_1, series_2 from 0 to 120 resolution 30ms") if err != nil { t.Fatalf("Unexpected error while parsing") return } context := ExecutionContext{ TimeseriesStorageAPI: fakeBackend, MetricMetadataAPI: fakeAPI, FetchLimit: 3, Timeout: 0, OptimizationConfiguration: optimize.NewOptimizationConfiguration(), } _, err = command.Execute(context) if err != nil { t.Fatalf("expected success with limit 3 but got err = %s", err.Error()) return } context.FetchLimit = 2 _, err = command.Execute(context) if err == nil { t.Fatalf("expected failure with limit = 2") return } command, err = Parse("select series2 from 0 to 120 resolution 30ms") if err != nil { t.Fatalf("Unexpected error while parsing") return } _, err = command.Execute(context) if err != nil { t.Fatalf("expected success with limit = 2 but got %s", err.Error()) } }
func TestProfilerIntegration(t *testing.T) { myAPI := mocks.NewFakeMetricMetadataAPI() fakeTimeStorage := mocks.FakeTimeseriesStorageAPI{} // myAPI := fakeAPI{ // tagSets: map[string][]api.TagSet{"A": []api.TagSet{ // {"x": "1", "y": "2"}, // {"x": "2", "y": "2"}, // {"x": "3", "y": "1"}, // }, // "B": []api.TagSet{ // {"q": "foo"}, // {"q": "bar"}, // }, // "C": []api.TagSet{ // {"c": "1"}, // {"c": "2"}, // {"c": "3"}, // {"c": "4"}, // {"c": "5"}, // {"c": "6"}, // }, // }, // } emptyGraphiteName := util.GraphiteMetric("") myAPI.AddPairWithoutGraphite(api.TaggedMetric{"A", api.ParseTagSet("x=1,y=2")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"A", api.ParseTagSet("x=2,y=2")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"A", api.ParseTagSet("x=3,y=1")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"B", api.ParseTagSet("q=foo")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"B", api.ParseTagSet("q=bar")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=1")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=2")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=3")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=4")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=5")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=6")}, emptyGraphiteName) testCases := []struct { query string expected map[string]int }{ { query: "describe all", expected: map[string]int{ "describe all.Execute": 1, "Mock GetAllMetrics": 1, }, }, { query: "select A from 0 to 0", expected: map[string]int{ "select.Execute": 1, "Mock FetchMultipleTimeseries": 1, "Mock GetAllTags": 1, "Mock FetchSingleTimeseries": 3, }, }, { query: "select A+A from 0 to 0", expected: map[string]int{ "select.Execute": 1, "Mock FetchMultipleTimeseries": 2, "Mock GetAllTags": 2, "Mock FetchSingleTimeseries": 6, }, }, { query: "select A+2 from 0 to 0", expected: map[string]int{ "select.Execute": 1, "Mock FetchMultipleTimeseries": 1, "Mock GetAllTags": 1, "Mock FetchSingleTimeseries": 3, }, }, { query: "select A where y = '2' from 0 to 0", expected: map[string]int{ "select.Execute": 1, "Mock FetchMultipleTimeseries": 1, "Mock GetAllTags": 1, "Mock FetchSingleTimeseries": 2, }, }, { query: "describe A", expected: map[string]int{ "describe.Execute": 1, "Mock GetAllTags": 1, }, }, { query: "describe metrics where y='2'", expected: map[string]int{ "describe metrics.Execute": 1, "Mock GetMetricsForTag": 1, }, }, { query: "describe all", expected: map[string]int{ "describe all.Execute": 1, "Mock GetAllMetrics": 1, }, }, } for _, test := range testCases { cmd, err := Parse(test.query) if err != nil { t.Error(err.Error()) continue } profilingCommand, profiler := NewProfilingCommand(cmd) _, err = profilingCommand.Execute(ExecutionContext{ TimeseriesStorageAPI: fakeTimeStorage, MetricMetadataAPI: myAPI, FetchLimit: 10000, Timeout: time.Second * 4, OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) if err != nil { t.Fatal(err.Error()) } list := profiler.All() counts := map[string]int{} for _, node := range list { counts[node.Name()]++ } if len(test.expected) != len(counts) { t.Errorf("The number of calls doesn't match the expected amount.") t.Errorf("Expected %+v, but got %+v", test.expected, counts) } for name, count := range test.expected { if counts[name] != count { t.Errorf("Expected `%s` to have %d occurrences, but had %d\n", name, count, counts[name]) t.Errorf("Expected: %+v\nBut got: %+v\n", test.expected, counts) break } } } }
func Test_Blueflood(t *testing.T) { timerange, err := api.NewTimerange(12000, 13000, 1000) if err != nil { t.Fatalf("invalid testcase timerange") return } graphite := mocks.FakeGraphiteConverter{ MetricMap: map[util.GraphiteMetric]api.TaggedMetric{ util.GraphiteMetric("some.key.graphite"): api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, }, } defaultClientConfig := Config{ BaseUrl: "https://blueflood.url", TenantId: "square", Ttls: make(map[string]int64), Timeout: time.Millisecond, FullResolutionOverlap: 0, GraphiteMetricConverter: &graphite, } // Not really MIN1440, but that's what default TTLs will get with the Timerange we use defaultQueryUrl := "https://blueflood.url/v2.0/square/views/some.key.graphite?from=12000&resolution=MIN1440&select=numPoints%2Caverage&to=14000" for _, test := range []struct { name string metricMap map[util.GraphiteMetric]api.TaggedMetric queryMetric api.TaggedMetric sampleMethod api.SampleMethod timerange api.Timerange clientConfig Config queryUrl string queryResponse string queryResponseCode int queryDelay time.Duration expectedErrorCode api.TimeseriesStorageErrorCode expectedSeriesList api.Timeseries }{ { name: "Success case", queryMetric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, sampleMethod: api.SampleMean, timerange: timerange, queryUrl: defaultQueryUrl, clientConfig: defaultClientConfig, queryResponse: `{ "unit": "unknown", "values": [ { "numPoints": 1, "timestamp": 12000, "average": 5 }, { "numPoints": 1, "timestamp": 13000, "average": 3 } ], "metadata": { "limit": null, "next_href": null, "count": 2, "marker": null } }`, expectedSeriesList: api.Timeseries{ Values: []float64{5, 3}, TagSet: api.ParseTagSet("tag=value"), }, }, { name: "Failure case - invalid JSON", queryMetric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, sampleMethod: api.SampleMean, timerange: timerange, clientConfig: defaultClientConfig, queryUrl: defaultQueryUrl, queryResponse: `{invalid}`, expectedErrorCode: api.FetchIOError, }, { name: "Failure case - HTTP error", queryMetric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, sampleMethod: api.SampleMean, timerange: timerange, clientConfig: defaultClientConfig, queryUrl: defaultQueryUrl, queryResponse: `{}`, queryResponseCode: 400, expectedErrorCode: api.FetchIOError, }, { name: "Failure case - timeout", queryMetric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, sampleMethod: api.SampleMean, timerange: timerange, clientConfig: defaultClientConfig, queryUrl: defaultQueryUrl, queryResponse: `{}`, queryDelay: 1 * time.Second, expectedErrorCode: api.FetchTimeoutError, }, } { a := assert.New(t).Contextf("%s", test.name) fakeApi := mocks.NewFakeMetricMetadataAPI() for k, v := range test.metricMap { fakeApi.AddPair(v, k, &graphite) } fakeHttpClient := mocks.NewFakeHttpClient() code := test.queryResponseCode if code == 0 { code = http.StatusOK } fakeHttpClient.SetResponse(test.queryUrl, mocks.Response{test.queryResponse, test.queryDelay, code}) b := NewBlueflood(test.clientConfig).(*Blueflood) b.client = fakeHttpClient seriesList, err := b.FetchSingleTimeseries(api.FetchTimeseriesRequest{ Metric: test.queryMetric, SampleMethod: test.sampleMethod, Timerange: test.timerange, MetricMetadata: fakeApi, Cancellable: api.NewCancellable(), }) if test.expectedErrorCode != 0 { if err == nil { a.Errorf("Expected error, but was successful.") continue } berr, ok := err.(api.TimeseriesStorageError) if !ok { a.Errorf("Failed to cast error to TimeseriesStorageError") continue } a.Eq(berr.Code, test.expectedErrorCode) } else { if err != nil { a.CheckError(err) continue } a.Eq(seriesList, test.expectedSeriesList) } } }
func TestFullResolutionDataFilling(t *testing.T) { graphite := mocks.FakeGraphiteConverter{ MetricMap: map[util.GraphiteMetric]api.TaggedMetric{ util.GraphiteMetric("some.key.value"): api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, }, } fakeApi := mocks.NewFakeMetricMetadataAPI() fakeApi.AddPair( api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, util.GraphiteMetric("some.key.value"), &graphite, ) now := time.Unix(1438734300000, 0) baseTime := now.Unix() * 1000 timeSource := func() time.Time { return now } queryTimerange, err := api.NewSnappedTimerange( int64(baseTime)-300*1000*10, // 50 minutes ago int64(baseTime)-300*1000*4, // 20 minutes ago 300*1000, // 5 minute resolution ) // The queries have to be relative to "now" defaultClientConfig := Config{ BaseUrl: "https://blueflood.url", TenantId: "square", Ttls: make(map[string]int64), Timeout: time.Millisecond, FullResolutionOverlap: 14400, GraphiteMetricConverter: &graphite, TimeSource: timeSource, } regularQueryURL := fmt.Sprintf( "https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=MIN5&select=numPoints%%2Caverage&to=%d", queryTimerange.Start(), queryTimerange.End()+queryTimerange.ResolutionMillis(), ) regularResponse := fmt.Sprintf(`{ "unit": "unknown", "values": [ { "numPoints": 28, "timestamp": %d, "average": 100 }, { "numPoints": 29, "timestamp": %d, "average": 142 }, { "numPoints": 27, "timestamp": %d, "average": 138 }, { "numPoints": 28, "timestamp": %d, "average": 182 } ], "metadata": { "limit": null, "next_href": null, "count": 4, "marker": null } }`, baseTime-300*1000*10, // 50 minutes ago baseTime-300*1000*9, // 45 minutes ago baseTime-300*1000*8, // 40 minutes ago baseTime-300*1000*7, // 35 minutes ago ) fullResolutionQueryURL := fmt.Sprintf( "https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=FULL&select=numPoints%%2Caverage&to=%d", queryTimerange.Start(), queryTimerange.End()+queryTimerange.ResolutionMillis(), ) fullResolutionResponse := fmt.Sprintf(`{ "unit": "unknown", "values": [ { "numPoints": 28, "timestamp": %d, "average": 13 }, { "numPoints": 29, "timestamp": %d, "average": 16 }, { "numPoints": 27, "timestamp": %d, "average": 19 }, { "numPoints": 28, "timestamp": %d, "average": 27 } ], "metadata": { "limit": null, "next_href": null, "count": 4, "marker": null } }`, baseTime-300*1000*6, // 30m ago baseTime-300*1000*5+17, // 25m ago with random shuffling baseTime-300*1000*4+2821, // 20m ago with random shuffling baseTime-300*1000*3, // 15m ago ) fakeHttpClient := mocks.NewFakeHttpClient() fakeHttpClient.SetResponse(regularQueryURL, mocks.Response{regularResponse, 0, http.StatusOK}) fakeHttpClient.SetResponse(fullResolutionQueryURL, mocks.Response{fullResolutionResponse, 0, http.StatusOK}) defaultClientConfig.HttpClient = fakeHttpClient defaultClientConfig.TimeSource = timeSource b := NewBlueflood(defaultClientConfig) if err != nil { t.Fatalf("timerange error: %s", err.Error()) } seriesList, err := b.FetchSingleTimeseries(api.FetchTimeseriesRequest{ Metric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, SampleMethod: api.SampleMean, Timerange: queryTimerange, MetricMetadata: fakeApi, Cancellable: api.NewCancellable(), }) if err != nil { t.Fatalf("Expected success, but got error: %s", err.Error()) } expected := []float64{100, 142, 138, 182, 13, 16, 19} if len(seriesList.Values) != len(expected) { t.Fatalf("Expected %+v but got %+v", expected, seriesList) } for i, expect := range expected { if seriesList.Values[i] != expect { t.Fatalf("Expected %+v but got %+v", expected, seriesList) } } }
func TestIncludeRawPayload(t *testing.T) { graphite := mocks.FakeGraphiteConverter{ MetricMap: map[util.GraphiteMetric]api.TaggedMetric{ util.GraphiteMetric("some.key.value"): api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, }, } fakeApi := mocks.NewFakeMetricMetadataAPI() fakeApi.AddPair( api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, util.GraphiteMetric("some.key.value"), &graphite, ) now := time.Unix(1438734300000, 0) baseTime := now.Unix() * 1000 timeSource := func() time.Time { return now } queryTimerange, err := api.NewSnappedTimerange( int64(baseTime)-300*1000*10, // 50 minutes ago int64(baseTime)-300*1000*4, // 20 minutes ago 300*1000, // 5 minute resolution ) // The queries have to be relative to "now" defaultClientConfig := Config{ BaseUrl: "https://blueflood.url", TenantId: "square", Ttls: make(map[string]int64), Timeout: time.Millisecond, FullResolutionOverlap: 14400, GraphiteMetricConverter: &graphite, TimeSource: timeSource, } regularQueryURL := fmt.Sprintf( "https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=MIN5&select=numPoints%%2Caverage&to=%d", queryTimerange.Start(), queryTimerange.End()+queryTimerange.ResolutionMillis(), ) regularResponse := fmt.Sprintf(`{ "unit": "unknown", "values": [ { "numPoints": 28, "timestamp": %d, "average": 100 }, { "numPoints": 29, "timestamp": %d, "average": 142 }, { "numPoints": 27, "timestamp": %d, "average": 138 }, { "numPoints": 28, "timestamp": %d, "average": 182 } ], "metadata": { "limit": null, "next_href": null, "count": 4, "marker": null } }`, baseTime-300*1000*10, // 50 minutes ago baseTime-300*1000*9, // 45 minutes ago baseTime-300*1000*8, // 40 minutes ago baseTime-300*1000*7, // 35 minutes ago ) fakeHttpClient := mocks.NewFakeHttpClient() fakeHttpClient.SetResponse(regularQueryURL, mocks.Response{regularResponse, 0, http.StatusOK}) // fakeHttpClient.SetResponse(fullResolutionQueryURL, mocks.Response{fullResolutionResponse, 0, http.StatusOK}) defaultClientConfig.HttpClient = fakeHttpClient defaultClientConfig.TimeSource = timeSource b := NewBlueflood(defaultClientConfig) if err != nil { t.Fatalf("timerange error: %s", err.Error()) } userConfig := api.UserSpecifiableConfig{ IncludeRawData: true, } timeSeries, err := b.FetchSingleTimeseries(api.FetchTimeseriesRequest{ Metric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, SampleMethod: api.SampleMean, Timerange: queryTimerange, MetricMetadata: fakeApi, Cancellable: api.NewCancellable(), UserSpecifiableConfig: userConfig, }) if err != nil { t.Fatalf("Expected success, but got error: %s", err.Error()) } if timeSeries.Raw == nil || string(timeSeries.Raw[0]) != regularResponse { t.Fatalf("Didn't fill in the raw result correctly, got: %s\n", string(timeSeries.Raw[0])) } }