func main() { flag.Parse() common.SetupLogger() config := common.LoadConfig() cassandraConfig := cassandra.CassandraMetricMetadataConfig{ Hosts: config.MetricMetadataConfig.Hosts, Keyspace: config.MetricMetadataConfig.Keyspace, } apiInstance := common.NewMetricMetadataAPI(cassandraConfig) ruleset, err := util.LoadRules(config.MetricMetadataConfig.ConversionRulesPath) if err != nil { //Blah } graphite := util.RuleBasedGraphiteConverter{Ruleset: ruleset} config.Blueflood.GraphiteMetricConverter = &graphite blueflood := blueflood.NewBlueflood(config.Blueflood) optimizer := optimize.NewOptimizationConfiguration() optimizer.EnableMetricMetadataCaching = true startServer(config.UIConfig, query.ExecutionContext{ MetricMetadataAPI: apiInstance, TimeseriesStorageAPI: blueflood, FetchLimit: 1000, SlotLimit: 5000, Registry: registry.Default(), OptimizationConfiguration: optimizer, }) }
func TestMovingAverage(t *testing.T) { fakeAPI := mocks.NewFakeMetricMetadataAPI() fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series", api.NewTagSet()}) fakeBackend := movingAverageBackend{} timerange, err := api.NewTimerange(1200, 1500, 100) if err != nil { t.Fatalf(err.Error()) } expression := &functionExpression{ functionName: "transform.moving_average", groupBy: []string{}, arguments: []function.Expression{ &metricFetchExpression{"series", api.TruePredicate}, durationExpression{"300ms", 300 * time.Millisecond}, }, } backend := fakeBackend result, err := evaluateToSeriesList(expression, &function.EvaluationContext{ MetricMetadataAPI: fakeAPI, TimeseriesStorageAPI: backend, Timerange: timerange, SampleMethod: api.SampleMean, FetchLimit: function.NewFetchCounter(1000), Registry: registry.Default(), Cancellable: api.NewCancellable(), OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) if err != nil { t.Errorf(err.Error()) } expected := []float64{4, 3, 11.0 / 3, 5} if len(result.Series) != 1 { t.Fatalf("expected exactly 1 returned series") } if len(result.Series[0].Values) != len(expected) { t.Fatalf("expected exactly %d values in returned series, but got %d", len(expected), len(result.Series[0].Values)) } const eps = 1e-7 for i := range expected { if math.Abs(result.Series[0].Values[i]-expected[i]) > eps { t.Fatalf("expected %+v but got %+v", expected, result.Series[0].Values) } } }
func main() { flag.Parse() common.SetupLogger() //Adding a signal handler to dump goroutines sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGUSR2) go func() { for _ = range sigs { pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) } }() config := common.LoadConfig() cassandraConfig := cassandra.CassandraMetricMetadataConfig{ Hosts: config.MetricMetadataConfig.Hosts, Keyspace: config.MetricMetadataConfig.Keyspace, } apiInstance := common.NewMetricMetadataAPI(cassandraConfig) ruleset, err := util.LoadRules(config.MetricMetadataConfig.ConversionRulesPath) if err != nil { //Blah } graphite := util.RuleBasedGraphiteConverter{Ruleset: ruleset} config.Blueflood.GraphiteMetricConverter = &graphite blueflood := blueflood.NewBlueflood(config.Blueflood) optimizer := optimize.NewOptimizationConfiguration() optimizer.EnableMetricMetadataCaching = true //Defaults userConfig := api.UserSpecifiableConfig{ IncludeRawData: false, } startServer(config.UIConfig, query.ExecutionContext{ MetricMetadataAPI: apiInstance, TimeseriesStorageAPI: blueflood, FetchLimit: 1500, SlotLimit: 5000, Registry: registry.Default(), OptimizationConfiguration: optimizer, UserSpecifiableConfig: userConfig, }) }
func TestCommand_Describe(t *testing.T) { fakeAPI := mocks.NewFakeMetricMetadataAPI() fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_0", api.ParseTagSet("dc=west,env=production,host=a")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_0", api.ParseTagSet("dc=west,env=staging,host=b")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_0", api.ParseTagSet("dc=east,env=production,host=c")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_0", api.ParseTagSet("dc=east,env=staging,host=d")}, emptyGraphiteName) for _, test := range []struct { query string metricmetadata api.MetricMetadataAPI expected map[string][]string }{ {"describe series_0", fakeAPI, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c", "d"}}}, {"describe`series_0`", fakeAPI, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c", "d"}}}, {"describe series_0 where dc='west'", fakeAPI, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}}, {"describe`series_0`where(dc='west')", fakeAPI, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}}, {"describe series_0 where dc='west' or env = 'production'", fakeAPI, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c"}}}, {"describe series_0 where`dc`='west'or`env`='production'", fakeAPI, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c"}}}, {"describe series_0 where dc='west' or env = 'production' and doesnotexist = ''", fakeAPI, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}}, {"describe series_0 where env = 'production' and doesnotexist = '' or dc = 'west'", fakeAPI, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}}, {"describe series_0 where (dc='west' or env = 'production') and doesnotexist = ''", fakeAPI, map[string][]string{}}, {"describe series_0 where(dc='west' or env = 'production')and`doesnotexist` = ''", fakeAPI, map[string][]string{}}, } { a := assert.New(t).Contextf("query=%s", test.query) command, err := Parse(test.query) if err != nil { a.Errorf("Unexpected error while parsing") continue } a.EqString(command.Name(), "describe") fakeTimeseriesStorage := mocks.FakeTimeseriesStorageAPI{} rawResult, err := command.Execute(ExecutionContext{ TimeseriesStorageAPI: fakeTimeseriesStorage, MetricMetadataAPI: test.metricmetadata, FetchLimit: 1000, Timeout: 0, OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) a.CheckError(err) a.Eq(rawResult, test.expected) } }
func TestCommand_DescribeAll(t *testing.T) { fakeAPI := mocks.NewFakeMetricMetadataAPI() fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_0", api.ParseTagSet("")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_1", api.ParseTagSet("")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_2", api.ParseTagSet("")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_3", api.ParseTagSet("")}, emptyGraphiteName) for _, test := range []struct { query string metricmetadata api.MetricMetadataAPI expected []api.MetricKey }{ {"describe all", fakeAPI, []api.MetricKey{"series_0", "series_1", "series_2", "series_3"}}, {"describe all match '_0'", fakeAPI, []api.MetricKey{"series_0"}}, {"describe all match '_5'", fakeAPI, []api.MetricKey{}}, } { a := assert.New(t).Contextf("query=%s", test.query) command, err := Parse(test.query) if err != nil { a.Errorf("Unexpected error while parsing") continue } a.EqString(command.Name(), "describe all") fakeMulti := mocks.FakeTimeseriesStorageAPI{} rawResult, err := command.Execute(ExecutionContext{ TimeseriesStorageAPI: fakeMulti, MetricMetadataAPI: test.metricmetadata, FetchLimit: 1000, Timeout: 0, OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) a.CheckError(err) a.Eq(rawResult, test.expected) } }
func TestTag(t *testing.T) { fakeAPI := mocks.NewFakeMetricMetadataAPI() fakeBackend := mocks.FakeTimeseriesStorageAPI{} tests := []struct { query string expected api.SeriesList }{ { query: "select series_2 | tag.drop('dc') from 0 to 120", expected: api.SeriesList{ Series: []api.Timeseries{ { Values: []float64{1, 2, 3, 4, 5}, TagSet: api.TagSet{}, }, { Values: []float64{3, 0, 3, 6, 2}, TagSet: api.TagSet{}, }, }, }, }, { query: "select series_2 | tag.drop('none') from 0 to 120", expected: api.SeriesList{ Series: []api.Timeseries{ { Values: []float64{1, 2, 3, 4, 5}, TagSet: api.TagSet{"dc": "west"}, }, { Values: []float64{3, 0, 3, 6, 2}, TagSet: api.TagSet{"dc": "east"}, }, }, }, }, { query: "select series_2 | tag.set('dc', 'north') from 0 to 120", expected: api.SeriesList{ Series: []api.Timeseries{ { Values: []float64{1, 2, 3, 4, 5}, TagSet: api.TagSet{"dc": "north"}, }, { Values: []float64{3, 0, 3, 6, 2}, TagSet: api.TagSet{"dc": "north"}, }, }, }, }, { query: "select series_2 | tag.set('none', 'north') from 0 to 120", expected: api.SeriesList{ Series: []api.Timeseries{ { Values: []float64{1, 2, 3, 4, 5}, TagSet: api.TagSet{"dc": "west", "none": "north"}, }, { Values: []float64{3, 0, 3, 6, 2}, TagSet: api.TagSet{"dc": "east", "none": "north"}, }, }, }, }, } for _, test := range tests { command, err := Parse(test.query) if err != nil { t.Fatalf("Unexpected error while parsing") return } if command.Name() != "select" { t.Errorf("Expected select command but got %s", command.Name()) continue } rawResult, err := command.Execute(ExecutionContext{ TimeseriesStorageAPI: fakeBackend, MetricMetadataAPI: fakeAPI, FetchLimit: 1000, Timeout: 0, OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) if err != nil { t.Errorf("Unexpected error while execution: %s", err.Error()) continue } seriesListList, ok := rawResult.([]api.SeriesList) if !ok || len(seriesListList) != 1 { t.Errorf("expected query `%s` to produce []value; got %+v :: %T", test.query, rawResult, rawResult) continue } list := seriesListList[0] if err != nil { t.Fatal(err) } a := assert.New(t) expectedSeries := test.expected.Series for i, series := range list.Series { a.EqFloatArray(series.Values, expectedSeries[i].Values, 1e-100) if !series.TagSet.Equals(expectedSeries[i].TagSet) { t.Errorf("expected tagset %+v but got %+v for series %d of query %s", expectedSeries[i].TagSet, series.TagSet, i, test.query) } } } }
func TestQuery(t *testing.T) { fakeAPI := mocks.NewFakeMetricMetadataAPI() fakeBackend := mocks.FakeTimeseriesStorageAPI{} tests := []struct { query string expected string }{ { query: "select series_1 from 0 to 0", expected: "series_1", }, { query: "select series_1 + 17 from 0 to 0", expected: "(series_1 + 17)", }, { query: "select series_1 + 2342.32 from 0 to 0", expected: "(series_1 + 2342.32)", }, { query: "select series_1*17 from 0 to 0", expected: "(series_1 * 17)", }, { query: "select aggregate.sum(series_1) from 0 to 0", expected: "aggregate.sum(series_1)", }, { query: "select aggregate.sum(series_1 group by dc) from 0 to 0", expected: "aggregate.sum(series_1 group by dc)", }, { query: "select aggregate.sum(series_1 group by dc,env) from 0 to 0", expected: "aggregate.sum(series_1 group by dc, env)", }, { query: "select aggregate.sum(series_1 collapse by dc) from 0 to 0", expected: "aggregate.sum(series_1 collapse by dc)", }, { query: "select aggregate.sum(series_1 collapse by dc,env) from 0 to 0", expected: "aggregate.sum(series_1 collapse by dc, env)", }, { query: "select transform.alias(aggregate.sum(series_1 group by dc,env), 'hello') from 0 to 0", expected: "transform.alias(aggregate.sum(series_1 group by dc, env), \"hello\")", }, { query: "select transform.moving_average(series_2, 2h) from 0 to 0", expected: "transform.moving_average(series_2, 2h)", }, { query: "select filter.lowest_max(series_2, 6) from 0 to 0", expected: "filter.lowest_max(series_2, 6)", }, } for _, test := range tests { command, err := Parse(test.query) if err != nil { t.Fatalf("Unexpected error while parsing") return } if command.Name() != "select" { t.Errorf("Expected select command but got %s", command.Name()) continue } rawResult, err := command.Execute(ExecutionContext{ TimeseriesStorageAPI: fakeBackend, MetricMetadataAPI: fakeAPI, FetchLimit: 1000, Timeout: 0, OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) if err != nil { t.Errorf("Unexpected error while execution: %s", err.Error()) continue } seriesListList, ok := rawResult.([]api.SeriesList) if !ok || len(seriesListList) != 1 { t.Errorf("expected query `%s` to produce []value; got %+v :: %T", test.query, rawResult, rawResult) continue } actual := seriesListList[0].Query if actual != test.expected { t.Errorf("Expected `%s` but got `%s` for query `%s`", test.expected, actual, test.query) continue } } }
func TestCommand_Select(t *testing.T) { epsilon := 1e-10 fakeAPI := mocks.NewFakeMetricMetadataAPI() fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_1", api.ParseTagSet("dc=west")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_2", api.ParseTagSet("dc=east")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_2", api.ParseTagSet("dc=west")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_3", api.ParseTagSet("dc=west")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_3", api.ParseTagSet("dc=east")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_3", api.ParseTagSet("dc=north")}, emptyGraphiteName) fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_timeout", api.ParseTagSet("dc=west")}, emptyGraphiteName) var fakeBackend mocks.FakeTimeseriesStorageAPI testTimerange, err := api.NewTimerange(0, 120, 30) if err != nil { t.Errorf("Invalid test timerange") return } earlyTimerange, err := api.NewTimerange(0, 60, 30) if err != nil { t.Errorf("Invalid test timerange") } lateTimerange, err := api.NewTimerange(60, 120, 30) if err != nil { t.Errorf("Invalid test timerange") } for _, test := range []struct { query string expectError bool expected api.SeriesList }{ {"select does_not_exist from 0 to 120 resolution 30ms", true, api.SeriesList{}}, {"select series_1 from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{1, 2, 3, 4, 5}, api.ParseTagSet("dc=west"), }}, Timerange: testTimerange, Name: "series_1", }}, {"select series_timeout from 0 to 120 resolution 30ms", true, api.SeriesList{}}, {"select series_1 + 1 from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 3, 4, 5, 6}, api.ParseTagSet("dc=west"), }}, Timerange: testTimerange, Name: "", }}, {"select series_1 * 2 from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 4, 6, 8, 10}, api.ParseTagSet("dc=west"), }}, Timerange: testTimerange, Name: "", }}, {"select aggregate.max(series_2) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{3, 2, 3, 6, 5}, api.NewTagSet(), }}, Timerange: testTimerange, Name: "series_2", }}, {"select (1 + series_2) | aggregate.max from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{4, 3, 4, 7, 6}, api.NewTagSet(), }}, Timerange: testTimerange, Name: "series_2", }}, {"select series_1 from 0 to 60 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{1, 2, 3}, api.ParseTagSet("dc=west"), }}, Timerange: earlyTimerange, Name: "series_1", }}, {"select transform.timeshift(series_1,31ms) from 0 to 60 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 3, 4}, api.ParseTagSet("dc=west"), }}, Timerange: earlyTimerange, Name: "series_1", }}, {"select transform.timeshift(series_1,62ms) from 0 to 60 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{3, 4, 5}, api.ParseTagSet("dc=west"), }}, Timerange: earlyTimerange, Name: "series_1", }}, {"select transform.timeshift(series_1,29ms) from 0 to 60 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 3, 4}, api.ParseTagSet("dc=west"), }}, Timerange: earlyTimerange, Name: "series_1", }}, {"select transform.timeshift(series_1,-31ms) from 60 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 3, 4}, api.ParseTagSet("dc=west"), }}, Timerange: lateTimerange, Name: "series_1", }}, {"select transform.timeshift(series_1,-29ms) from 60 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{{ []float64{2, 3, 4}, api.ParseTagSet("dc=west"), }}, Timerange: lateTimerange, Name: "series_1", }}, {"select series_3 from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, }, }}, {"select series_3 | filter.recent_highest_max(3, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, }, }}, {"select series_3 | filter.recent_highest_max(2, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, }, }}, {"select series_3 | filter.recent_highest_max(1, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, }, }}, {"select series_3 | filter.recent_lowest_max(3, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, }, }}, {"select series_3 | filter.recent_lowest_max(4, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, }, }}, {"select series_3 | filter.recent_highest_max(70, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, }, }}, {"select series_3 | filter.recent_lowest_max(2, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, }, }}, {"select series_3 | filter.recent_lowest_max(1, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, }, }}, {"select series_3 | filter.recent_highest_max(3, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, }, }}, {"select series_3 | filter.recent_highest_max(2, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, }, }}, {"select series_3 | filter.recent_highest_max(1, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, }, }}, {"select series_3 | filter.recent_lowest_max(3, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, { []float64{5, 5, 5, 2, 2}, api.ParseTagSet("dc=east"), }, }, }}, {"select series_3 | filter.recent_lowest_max(2, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, { []float64{1, 1, 1, 4, 4}, api.ParseTagSet("dc=west"), }, }, }}, {"select series_3 | filter.recent_lowest_max(1, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{ Series: []api.Timeseries{ { []float64{3, 3, 3, 3, 3}, api.ParseTagSet("dc=north"), }, }, }}, {"select series_1 from -1000d to now resolution 30s", true, api.SeriesList{}}, } { a := assert.New(t).Contextf("query=%s", test.query) expected := test.expected command, err := Parse(test.query) if err != nil { a.Errorf("Unexpected error while parsing") continue } a.EqString(command.Name(), "select") rawResult, err := command.Execute(ExecutionContext{ TimeseriesStorageAPI: fakeBackend, MetricMetadataAPI: fakeAPI, FetchLimit: 1000, Timeout: 100 * time.Millisecond, OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) if err != nil { if !test.expectError { a.Errorf("Unexpected error while executing: %s", err.Error()) } } else { casted := rawResult.([]function.Value) actual, _ := casted[0].ToSeriesList(api.Timerange{}) a.EqInt(len(actual.Series), len(expected.Series)) if len(actual.Series) == len(expected.Series) { for i := 0; i < len(expected.Series); i++ { a.Eq(actual.Series[i].TagSet, expected.Series[i].TagSet) actualLength := len(actual.Series[i].Values) expectedLength := len(actual.Series[i].Values) a.Eq(actualLength, expectedLength) if actualLength == expectedLength { for j := 0; j < actualLength; j++ { a.EqFloat(actual.Series[i].Values[j], expected.Series[i].Values[j], epsilon) } } } } } } // Test that the limit is correct command, err := Parse("select series_1, series_2 from 0 to 120 resolution 30ms") if err != nil { t.Fatalf("Unexpected error while parsing") return } context := ExecutionContext{ TimeseriesStorageAPI: fakeBackend, MetricMetadataAPI: fakeAPI, FetchLimit: 3, Timeout: 0, OptimizationConfiguration: optimize.NewOptimizationConfiguration(), } _, err = command.Execute(context) if err != nil { t.Fatalf("expected success with limit 3 but got err = %s", err.Error()) return } context.FetchLimit = 2 _, err = command.Execute(context) if err == nil { t.Fatalf("expected failure with limit = 2") return } command, err = Parse("select series2 from 0 to 120 resolution 30ms") if err != nil { t.Fatalf("Unexpected error while parsing") return } _, err = command.Execute(context) if err != nil { t.Fatalf("expected success with limit = 2 but got %s", err.Error()) } }
func TestProfilerIntegration(t *testing.T) { myAPI := mocks.NewFakeMetricMetadataAPI() fakeTimeStorage := mocks.FakeTimeseriesStorageAPI{} // myAPI := fakeAPI{ // tagSets: map[string][]api.TagSet{"A": []api.TagSet{ // {"x": "1", "y": "2"}, // {"x": "2", "y": "2"}, // {"x": "3", "y": "1"}, // }, // "B": []api.TagSet{ // {"q": "foo"}, // {"q": "bar"}, // }, // "C": []api.TagSet{ // {"c": "1"}, // {"c": "2"}, // {"c": "3"}, // {"c": "4"}, // {"c": "5"}, // {"c": "6"}, // }, // }, // } emptyGraphiteName := util.GraphiteMetric("") myAPI.AddPairWithoutGraphite(api.TaggedMetric{"A", api.ParseTagSet("x=1,y=2")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"A", api.ParseTagSet("x=2,y=2")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"A", api.ParseTagSet("x=3,y=1")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"B", api.ParseTagSet("q=foo")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"B", api.ParseTagSet("q=bar")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=1")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=2")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=3")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=4")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=5")}, emptyGraphiteName) myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=6")}, emptyGraphiteName) testCases := []struct { query string expected map[string]int }{ { query: "describe all", expected: map[string]int{ "describe all.Execute": 1, "Mock GetAllMetrics": 1, }, }, { query: "select A from 0 to 0", expected: map[string]int{ "select.Execute": 1, "Mock FetchMultipleTimeseries": 1, "Mock GetAllTags": 1, "Mock FetchSingleTimeseries": 3, }, }, { query: "select A+A from 0 to 0", expected: map[string]int{ "select.Execute": 1, "Mock FetchMultipleTimeseries": 2, "Mock GetAllTags": 2, "Mock FetchSingleTimeseries": 6, }, }, { query: "select A+2 from 0 to 0", expected: map[string]int{ "select.Execute": 1, "Mock FetchMultipleTimeseries": 1, "Mock GetAllTags": 1, "Mock FetchSingleTimeseries": 3, }, }, { query: "select A where y = '2' from 0 to 0", expected: map[string]int{ "select.Execute": 1, "Mock FetchMultipleTimeseries": 1, "Mock GetAllTags": 1, "Mock FetchSingleTimeseries": 2, }, }, { query: "describe A", expected: map[string]int{ "describe.Execute": 1, "Mock GetAllTags": 1, }, }, { query: "describe metrics where y='2'", expected: map[string]int{ "describe metrics.Execute": 1, "Mock GetMetricsForTag": 1, }, }, { query: "describe all", expected: map[string]int{ "describe all.Execute": 1, "Mock GetAllMetrics": 1, }, }, } for _, test := range testCases { cmd, err := Parse(test.query) if err != nil { t.Error(err.Error()) continue } profilingCommand, profiler := NewProfilingCommand(cmd) _, err = profilingCommand.Execute(ExecutionContext{ TimeseriesStorageAPI: fakeTimeStorage, MetricMetadataAPI: myAPI, FetchLimit: 10000, Timeout: time.Second * 4, OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) if err != nil { t.Fatal(err.Error()) } list := profiler.All() counts := map[string]int{} for _, node := range list { counts[node.Name()]++ } if len(test.expected) != len(counts) { t.Errorf("The number of calls doesn't match the expected amount.") t.Errorf("Expected %+v, but got %+v", test.expected, counts) } for name, count := range test.expected { if counts[name] != count { t.Errorf("Expected `%s` to have %d occurrences, but had %d\n", name, count, counts[name]) t.Errorf("Expected: %+v\nBut got: %+v\n", test.expected, counts) break } } } }