func newSuite() Suite { b := fakeBackend{make(chan struct{}, 10)} suite := Suite{ backend: b, multiBackend: NewParallelMultiBackend(b, 4), cancellable: api.NewCancellable(), } suite.waitGroup.Add(1) return suite }
// Execute performs the query represented by the given query string, and returs the result. func (cmd *SelectCommand) Execute(context ExecutionContext) (interface{}, error) { timerange, err := api.NewSnappedTimerange(cmd.context.Start, cmd.context.End, cmd.context.Resolution) if err != nil { return nil, err } hasTimeout := context.Timeout != 0 var cancellable api.Cancellable if hasTimeout { cancellable = api.NewTimeoutCancellable(time.Now().Add(context.Timeout)) } else { cancellable = api.NewCancellable() } r := context.Registry if r == nil { r = registry.Default() } defer close(cancellable.Done()) // broadcast the finish - this ensures that the future work is cancelled. evaluationContext := function.EvaluationContext{ API: context.API, FetchLimit: function.NewFetchCounter(context.FetchLimit), MultiBackend: context.Backend, Predicate: cmd.predicate, SampleMethod: cmd.context.SampleMethod, Timerange: timerange, Cancellable: cancellable, Profiler: context.Profiler, Registry: r, } if hasTimeout { timeout := time.After(context.Timeout) results := make(chan interface{}) errors := make(chan error) go func() { result, err := evaluateExpressions(evaluationContext, cmd.expressions) if err != nil { errors <- err } else { results <- result } }() select { case <-timeout: return nil, fmt.Errorf("Timeout while executing the query.") // timeout. case result := <-results: return result, nil case err := <-errors: return nil, err } } else { return evaluateExpressions(evaluationContext, cmd.expressions) } }
func TestMovingAverage(t *testing.T) { fakeAPI := mocks.NewFakeMetricMetadataAPI() fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series", api.NewTagSet()}) fakeBackend := movingAverageBackend{} timerange, err := api.NewTimerange(1200, 1500, 100) if err != nil { t.Fatalf(err.Error()) } expression := &functionExpression{ functionName: "transform.moving_average", groupBy: []string{}, arguments: []function.Expression{ &metricFetchExpression{"series", api.TruePredicate}, durationExpression{"300ms", 300 * time.Millisecond}, }, } backend := fakeBackend result, err := evaluateToSeriesList(expression, &function.EvaluationContext{ MetricMetadataAPI: fakeAPI, TimeseriesStorageAPI: backend, Timerange: timerange, SampleMethod: api.SampleMean, FetchLimit: function.NewFetchCounter(1000), Registry: registry.Default(), Cancellable: api.NewCancellable(), OptimizationConfiguration: optimize.NewOptimizationConfiguration(), }) if err != nil { t.Errorf(err.Error()) } expected := []float64{4, 3, 11.0 / 3, 5} if len(result.Series) != 1 { t.Fatalf("expected exactly 1 returned series") } if len(result.Series[0].Values) != len(expected) { t.Fatalf("expected exactly %d values in returned series, but got %d", len(expected), len(result.Series[0].Values)) } const eps = 1e-7 for i := range expected { if math.Abs(result.Series[0].Values[i]-expected[i]) > eps { t.Fatalf("expected %+v but got %+v", expected, result.Series[0].Values) } } }
func Test_evaluateBinaryOperation(t *testing.T) { emptyContext := &function.EvaluationContext{ TimeseriesStorageAPI: FakeBackend{}, MetricMetadataAPI: nil, Timerange: api.Timerange{}, SampleMethod: api.SampleMean, Predicate: nil, FetchLimit: function.NewFetchCounter(1000), Cancellable: api.NewCancellable(), } for _, test := range []struct { context *function.EvaluationContext functionName string left api.SeriesList right api.SeriesList evalFunction func(float64, float64) float64 expectSuccess bool expectedResultValues [][]float64 }{ { emptyContext, "add", api.SeriesList{ []api.Timeseries{ { Values: []float64{1, 2, 3}, TagSet: api.TagSet{}, }, }, api.Timerange{}, "", "", }, api.SeriesList{ []api.Timeseries{ { Values: []float64{4, 5, 1}, TagSet: api.TagSet{}, }, }, api.Timerange{}, "", "", }, func(left, right float64) float64 { return left + right }, true, [][]float64{{5, 7, 4}}, }, { emptyContext, "subtract", api.SeriesList{ []api.Timeseries{ { Values: []float64{1, 2, 3}, }, }, api.Timerange{}, "", "", }, api.SeriesList{ []api.Timeseries{ { Values: []float64{4, 5, 1}, }, }, api.Timerange{}, "", "", }, func(left, right float64) float64 { return left - right }, true, [][]float64{{-3, -3, 2}}, }, { emptyContext, "add", api.SeriesList{ []api.Timeseries{ api.Timeseries{ Values: []float64{1, 2, 3}, TagSet: api.TagSet{ "env": "production", "host": "#1", }, }, api.Timeseries{ Values: []float64{7, 7, 7}, TagSet: api.TagSet{ "env": "staging", "host": "#2", }, }, api.Timeseries{ Values: []float64{1, 0, 2}, TagSet: api.TagSet{ "env": "staging", "host": "#3", }, }, }, api.Timerange{}, "", "", }, api.SeriesList{ []api.Timeseries{ api.Timeseries{ Values: []float64{5, 5, 5}, TagSet: api.TagSet{ "env": "staging", }, }, api.Timeseries{ Values: []float64{10, 100, 1000}, TagSet: api.TagSet{ "env": "production", }, }, }, api.Timerange{}, "", "", }, func(left, right float64) float64 { return left + right }, true, [][]float64{{11, 102, 1003}, {12, 12, 12}, {6, 5, 7}}, }, { emptyContext, "add", api.SeriesList{ []api.Timeseries{ api.Timeseries{ Values: []float64{1, 2, 3}, TagSet: api.TagSet{ "env": "production", "host": "#1", }, }, api.Timeseries{ Values: []float64{4, 5, 6}, TagSet: api.TagSet{ "env": "staging", "host": "#2", }, }, api.Timeseries{ Values: []float64{7, 8, 9}, TagSet: api.TagSet{ "env": "staging", "host": "#3", }, }, }, api.Timerange{}, "", "", }, api.SeriesList{ []api.Timeseries{ api.Timeseries{ Values: []float64{2, 2, 2}, TagSet: api.TagSet{ "env": "staging", }, }, api.Timeseries{ Values: []float64{3, 3, 3}, TagSet: api.TagSet{ "env": "staging", }, }, }, api.Timerange{}, "", "", }, func(left, right float64) float64 { return left * right }, true, [][]float64{{8, 10, 12}, {14, 16, 18}, {12, 15, 18}, {21, 24, 27}}, }, { emptyContext, "add", api.SeriesList{ []api.Timeseries{ api.Timeseries{ Values: []float64{103, 103, 103}, TagSet: api.TagSet{ "env": "production", "host": "#1", }, }, api.Timeseries{ Values: []float64{203, 203, 203}, TagSet: api.TagSet{ "env": "staging", "host": "#2", }, }, api.Timeseries{ Values: []float64{303, 303, 303}, TagSet: api.TagSet{ "env": "staging", "host": "#3", }, }, }, api.Timerange{}, "", "", }, api.SeriesList{ []api.Timeseries{ api.Timeseries{ Values: []float64{1, 2, 3}, TagSet: api.TagSet{ "env": "staging", }, }, api.Timeseries{ Values: []float64{3, 0, 3}, TagSet: api.TagSet{ "env": "production", }, }, }, api.Timerange{}, "", "", }, func(left, right float64) float64 { return left - right }, true, [][]float64{{100, 103, 100}, {202, 201, 200}, {302, 301, 300}}, }, } { a := assert.New(t).Contextf("%+v", test) metricFun := registry.NewOperator(test.functionName, test.evalFunction) value, err := metricFun.Evaluate(test.context, []function.Expression{&LiteralSeriesExpression{test.left}, &LiteralSeriesExpression{test.right}}, []string{}, false) if err != nil { a.EqBool(err == nil, test.expectSuccess) continue } result, err := value.ToSeriesList(test.context.Timerange) if err != nil { a.EqBool(err == nil, test.expectSuccess) continue } // Our expected list should be the same length as the actual one: a.EqInt(len(result.Series), len(test.expectedResultValues)) // The "expected" results are only true up to permutation (since guessing the order they'll come out of `join()` is hard) // Provided that they're all unique then we just need to check that every member that's expected can be found // This is a bit more annoying: equal := func(left, right []float64) bool { if len(left) != len(right) { return false } for i := range left { if left[i] != right[i] { return false } } return true } for _, expectedMember := range test.expectedResultValues { found := false // check that expectedMember is inside our result list // look for it inside result.Series for _, resultMember := range result.Series { if equal(resultMember.Values, expectedMember) { found = true break } } if !found { t.Fatalf("got %+v for test %+v", result, test) } } } }
// Execute performs the query represented by the given query string, and returs the result. func (cmd *SelectCommand) Execute(context ExecutionContext) (interface{}, error) { timerange, err := api.NewSnappedTimerange(cmd.context.Start, cmd.context.End, cmd.context.Resolution) if err != nil { return nil, err } slotLimit := context.SlotLimit defaultLimit := 1000 if slotLimit == 0 { slotLimit = defaultLimit // the default limit } smallestResolution := timerange.Duration() / time.Duration(slotLimit-2) // ((end + res/2) - (start - res/2)) / res + 1 <= slots // make adjustments for a snap that moves the endpoints // (do some algebra) // (end - start + res) + res <= slots * res // end - start <= res * (slots - 2) // so // res >= (end - start) / (slots - 2) // Update the timerange by applying the insights of the storage API: chosenResolution := context.TimeseriesStorageAPI.ChooseResolution(timerange, smallestResolution) chosenTimerange, err := api.NewSnappedTimerange(timerange.Start(), timerange.End(), int64(chosenResolution/time.Millisecond)) if err != nil { return nil, err } if chosenTimerange.Slots() > slotLimit { return nil, function.NewLimitError( "Requested number of data points exceeds the configured limit", chosenTimerange.Slots(), slotLimit) } hasTimeout := context.Timeout != 0 var cancellable api.Cancellable if hasTimeout { cancellable = api.NewTimeoutCancellable(time.Now().Add(context.Timeout)) } else { cancellable = api.NewCancellable() } r := context.Registry if r == nil { r = registry.Default() } defer close(cancellable.Done()) // broadcast the finish - this ensures that the future work is cancelled. evaluationContext := function.EvaluationContext{ MetricMetadataAPI: context.MetricMetadataAPI, FetchLimit: function.NewFetchCounter(context.FetchLimit), TimeseriesStorageAPI: context.TimeseriesStorageAPI, Predicate: cmd.predicate, SampleMethod: cmd.context.SampleMethod, Timerange: timerange, Cancellable: cancellable, Registry: r, Profiler: context.Profiler, OptimizationConfiguration: context.OptimizationConfiguration, } if hasTimeout { timeout := time.After(context.Timeout) results := make(chan interface{}) errors := make(chan error) go func() { result, err := function.EvaluateMany(evaluationContext, cmd.expressions) if err != nil { errors <- err } else { results <- result } }() select { case <-timeout: return nil, function.NewLimitError("Timeout while executing the query.", context.Timeout, context.Timeout) case result := <-results: return result, nil case err := <-errors: return nil, err } } else { values, err := function.EvaluateMany(evaluationContext, cmd.expressions) if err != nil { return nil, err } lists := make([]api.SeriesList, len(values)) for i := range values { lists[i], err = values[i].ToSeriesList(evaluationContext.Timerange) if err != nil { return nil, err } } return lists, nil } }
// Execute performs the query represented by the given query string, and returs the result. func (cmd *SelectCommand) Execute(context ExecutionContext) (interface{}, error) { timerange, err := api.NewSnappedTimerange(cmd.context.Start, cmd.context.End, cmd.context.Resolution) if err != nil { return nil, err } slotLimit := context.SlotLimit defaultLimit := 1000 if slotLimit == 0 { slotLimit = defaultLimit // the default limit } if timerange.Slots() > slotLimit { return nil, function.NewLimitError( "Requested number of data points exceeds the configured limit", timerange.Slots(), slotLimit) } hasTimeout := context.Timeout != 0 var cancellable api.Cancellable if hasTimeout { cancellable = api.NewTimeoutCancellable(time.Now().Add(context.Timeout)) } else { cancellable = api.NewCancellable() } r := context.Registry if r == nil { r = registry.Default() } defer close(cancellable.Done()) // broadcast the finish - this ensures that the future work is cancelled. evaluationContext := function.EvaluationContext{ API: context.API, FetchLimit: function.NewFetchCounter(context.FetchLimit), MultiBackend: context.Backend, Predicate: cmd.predicate, SampleMethod: cmd.context.SampleMethod, Timerange: timerange, Cancellable: cancellable, Profiler: context.Profiler, Registry: r, } if hasTimeout { timeout := time.After(context.Timeout) results := make(chan interface{}) errors := make(chan error) go func() { result, err := function.EvaluateMany(evaluationContext, cmd.expressions) if err != nil { errors <- err } else { results <- result } }() select { case <-timeout: return nil, function.NewLimitError("Timeout while executing the query.", context.Timeout, context.Timeout) case result := <-results: return result, nil case err := <-errors: return nil, err } } else { values, err := function.EvaluateMany(evaluationContext, cmd.expressions) if err != nil { return nil, err } lists := make([]api.SeriesList, len(values)) for i := range values { lists[i], err = values[i].ToSeriesList(evaluationContext.Timerange) if err != nil { return nil, err } } return lists, nil } }
func Test_Blueflood(t *testing.T) { timerange, err := api.NewTimerange(12000, 13000, 1000) if err != nil { t.Fatalf("invalid testcase timerange") return } defaultClientConfig := Config{ "https://blueflood.url", "square", make(map[string]int64), time.Millisecond, 0, } // Not really MIN1440, but that's what default TTLs will get with the Timerange we use defaultQueryUrl := "https://blueflood.url/v2.0/square/views/some.key.graphite?from=12000&resolution=MIN1440&select=numPoints%2Caverage&to=14000" for _, test := range []struct { name string metricMap map[api.GraphiteMetric]api.TaggedMetric queryMetric api.TaggedMetric sampleMethod api.SampleMethod timerange api.Timerange clientConfig Config queryUrl string queryResponse string queryResponseCode int queryDelay time.Duration expectedErrorCode api.BackendErrorCode expectedSeriesList api.Timeseries }{ { name: "Success case", metricMap: map[api.GraphiteMetric]api.TaggedMetric{ api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, }, queryMetric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, sampleMethod: api.SampleMean, timerange: timerange, queryUrl: defaultQueryUrl, clientConfig: defaultClientConfig, queryResponse: `{ "unit": "unknown", "values": [ { "numPoints": 1, "timestamp": 12000, "average": 5 }, { "numPoints": 1, "timestamp": 13000, "average": 3 } ], "metadata": { "limit": null, "next_href": null, "count": 2, "marker": null } }`, expectedSeriesList: api.Timeseries{ Values: []float64{5, 3}, TagSet: api.ParseTagSet("tag=value"), }, }, { name: "Failure case - invalid JSON", metricMap: map[api.GraphiteMetric]api.TaggedMetric{ api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, }, queryMetric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, sampleMethod: api.SampleMean, timerange: timerange, clientConfig: defaultClientConfig, queryUrl: defaultQueryUrl, queryResponse: `{invalid}`, expectedErrorCode: api.FetchIOError, }, { name: "Failure case - HTTP error", metricMap: map[api.GraphiteMetric]api.TaggedMetric{ api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, }, queryMetric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, sampleMethod: api.SampleMean, timerange: timerange, clientConfig: defaultClientConfig, queryUrl: defaultQueryUrl, queryResponse: `{}`, queryResponseCode: 400, expectedErrorCode: api.FetchIOError, }, { name: "Failure case - timeout", metricMap: map[api.GraphiteMetric]api.TaggedMetric{ api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, }, queryMetric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, sampleMethod: api.SampleMean, timerange: timerange, clientConfig: defaultClientConfig, queryUrl: defaultQueryUrl, queryResponse: `{}`, queryDelay: 1 * time.Second, expectedErrorCode: api.FetchTimeoutError, }, } { a := assert.New(t).Contextf("%s", test.name) fakeApi := mocks.NewFakeApi() for k, v := range test.metricMap { fakeApi.AddPair(v, k) } fakeHttpClient := mocks.NewFakeHttpClient() code := test.queryResponseCode if code == 0 { code = http.StatusOK } fakeHttpClient.SetResponse(test.queryUrl, mocks.Response{test.queryResponse, test.queryDelay, code}) b := NewBlueflood(test.clientConfig).(*blueflood) b.client = fakeHttpClient seriesList, err := b.FetchSingleSeries(api.FetchSeriesRequest{ Metric: test.queryMetric, SampleMethod: test.sampleMethod, Timerange: test.timerange, API: fakeApi, Cancellable: api.NewCancellable(), }) if test.expectedErrorCode != 0 { if err == nil { a.Errorf("Expected error, but was successful.") continue } berr, ok := err.(api.BackendError) if !ok { a.Errorf("Failed to cast error to BackendError") continue } a.Eq(berr.Code, test.expectedErrorCode) } else { if err != nil { a.CheckError(err) continue } a.Eq(seriesList, test.expectedSeriesList) } } }
func TestFullResolutionDataFilling(t *testing.T) { // The queries have to be relative to "now" defaultClientConfig := Config{ "https://blueflood.url", "square", make(map[string]int64), time.Millisecond, 14400, } baseTime := 1438734300000 regularQueryURL := fmt.Sprintf( "https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=MIN5&select=numPoints%%2Caverage&to=%d", baseTime-300*1000*10, // 50 minutes ago baseTime-300*1000*3, // 15 minutes ago ) regularResponse := fmt.Sprintf(`{ "unit": "unknown", "values": [ { "numPoints": 28, "timestamp": %d, "average": 100 }, { "numPoints": 29, "timestamp": %d, "average": 142 }, { "numPoints": 27, "timestamp": %d, "average": 138 }, { "numPoints": 28, "timestamp": %d, "average": 182 } ], "metadata": { "limit": null, "next_href": null, "count": 4, "marker": null } }`, baseTime-300*1000*10, // 50 minutes ago baseTime-300*1000*9, // 45 minutes ago baseTime-300*1000*8, // 40 minutes ago baseTime-300*1000*7, // 35 minutes ago ) fullResolutionQueryURL := fmt.Sprintf( "https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=FULL&select=numPoints%%2Caverage&to=%d", baseTime-300*1000*10, // 50 minutes ago baseTime-300*1000*3, // 15 minutes ago ) fullResolutionResponse := fmt.Sprintf(`{ "unit": "unknown", "values": [ { "numPoints": 28, "timestamp": %d, "average": 13 }, { "numPoints": 29, "timestamp": %d, "average": 16 }, { "numPoints": 27, "timestamp": %d, "average": 19 }, { "numPoints": 28, "timestamp": %d, "average": 27 } ], "metadata": { "limit": null, "next_href": null, "count": 4, "marker": null } }`, baseTime-300*1000*6, // 30m ago baseTime-300*1000*5+17, // 25m ago with random shuffling baseTime-300*1000*4+2821, // 20m ago with random shuffling baseTime-300*1000*3, // 15m ago ) fakeHttpClient := mocks.NewFakeHttpClient() fakeHttpClient.SetResponse(regularQueryURL, mocks.Response{regularResponse, 0, http.StatusOK}) fakeHttpClient.SetResponse(fullResolutionQueryURL, mocks.Response{fullResolutionResponse, 0, http.StatusOK}) fakeApi := mocks.NewFakeApi() fakeApi.AddPair( api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, api.GraphiteMetric("some.key.value"), ) b := NewBlueflood(defaultClientConfig).(*blueflood) b.client = fakeHttpClient queryTimerange, err := api.NewSnappedTimerange( int64(baseTime)-300*1000*10, // 50 minutes ago int64(baseTime)-300*1000*4, // 20 minutes ago 300*1000, // 5 minute resolution ) if err != nil { t.Fatalf("timerange error: %s", err.Error()) } seriesList, err := b.FetchSingleSeries(api.FetchSeriesRequest{ Metric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, SampleMethod: api.SampleMean, Timerange: queryTimerange, API: fakeApi, Cancellable: api.NewCancellable(), }) if err != nil { t.Fatalf("Expected success, but got error: %s", err.Error()) } expected := []float64{100, 142, 138, 182, 13, 16, 19} if len(seriesList.Values) != len(expected) { t.Fatalf("Expected %+v but got %+v", expected, seriesList) } for i, expect := range expected { if seriesList.Values[i] != expect { t.Fatalf("Expected %+v but got %+v", expected, seriesList) } } }
// Execute performs the query represented by the given query string, and returs the result. func (cmd *SelectCommand) Execute(context ExecutionContext) (CommandResult, error) { userTimerange, err := api.NewSnappedTimerange(cmd.context.Start, cmd.context.End, cmd.context.Resolution) if err != nil { return CommandResult{}, err } slotLimit := context.SlotLimit defaultLimit := 1000 if slotLimit == 0 { slotLimit = defaultLimit // the default limit } smallestResolution := userTimerange.Duration() / time.Duration(slotLimit-2) // ((end + res/2) - (start - res/2)) / res + 1 <= slots // make adjustments for a snap that moves the endpoints // (do some algebra) // (end - start + res) + res <= slots * res // end - start <= res * (slots - 2) // so // res >= (end - start) / (slots - 2) // Update the timerange by applying the insights of the storage API: chosenResolution := context.TimeseriesStorageAPI.ChooseResolution(userTimerange, smallestResolution) chosenTimerange, err := api.NewSnappedTimerange(userTimerange.Start(), userTimerange.End(), int64(chosenResolution/time.Millisecond)) if err != nil { return CommandResult{}, err } if chosenTimerange.Slots() > slotLimit { return CommandResult{}, function.NewLimitError( "Requested number of data points exceeds the configured limit", chosenTimerange.Slots(), slotLimit) } hasTimeout := context.Timeout != 0 var cancellable api.Cancellable if hasTimeout { cancellable = api.NewTimeoutCancellable(time.Now().Add(context.Timeout)) } else { cancellable = api.NewCancellable() } r := context.Registry if r == nil { r = registry.Default() } defer close(cancellable.Done()) // broadcast the finish - this ensures that the future work is cancelled. evaluationContext := function.EvaluationContext{ MetricMetadataAPI: context.MetricMetadataAPI, FetchLimit: function.NewFetchCounter(context.FetchLimit), TimeseriesStorageAPI: context.TimeseriesStorageAPI, Predicate: cmd.predicate, SampleMethod: cmd.context.SampleMethod, Timerange: chosenTimerange, Cancellable: cancellable, Registry: r, Profiler: context.Profiler, OptimizationConfiguration: context.OptimizationConfiguration, EvaluationNotes: []string{}, UserSpecifiableConfig: context.UserSpecifiableConfig, } timeout := (<-chan time.Time)(nil) if hasTimeout { // A nil channel will just block forever timeout = time.After(context.Timeout) } results := make(chan []function.Value, 1) errors := make(chan error, 1) // Goroutines are never garbage collected, so we need to provide capacity so that the send always succeeds. go func() { // Evaluate the result, and send it along the goroutines. result, err := function.EvaluateMany(&evaluationContext, cmd.expressions) if err != nil { errors <- err return } results <- result }() select { case <-timeout: return CommandResult{}, function.NewLimitError("Timeout while executing the query.", context.Timeout, context.Timeout) case err := <-errors: return CommandResult{}, err case result := <-results: lists := make([]api.SeriesList, len(result)) for i := range result { lists[i], err = result[i].ToSeriesList(evaluationContext.Timerange) if err != nil { return CommandResult{}, err } } description := map[string][]string{} for _, list := range lists { for _, series := range list.Series { for key, value := range series.TagSet { description[key] = append(description[key], value) } } } for key, values := range description { natural_sort.Sort(values) filtered := []string{} for i := range values { if i == 0 || values[i-1] != values[i] { filtered = append(filtered, values[i]) } } description[key] = filtered } return CommandResult{ Body: lists, Metadata: map[string]interface{}{ "description": description, "notes": evaluationContext.EvaluationNotes, }, }, nil } }
func TestFullResolutionDataFilling(t *testing.T) { graphite := mocks.FakeGraphiteConverter{ MetricMap: map[util.GraphiteMetric]api.TaggedMetric{ util.GraphiteMetric("some.key.value"): api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, }, } fakeApi := mocks.NewFakeMetricMetadataAPI() fakeApi.AddPair( api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, util.GraphiteMetric("some.key.value"), &graphite, ) now := time.Unix(1438734300000, 0) baseTime := now.Unix() * 1000 timeSource := func() time.Time { return now } queryTimerange, err := api.NewSnappedTimerange( int64(baseTime)-300*1000*10, // 50 minutes ago int64(baseTime)-300*1000*4, // 20 minutes ago 300*1000, // 5 minute resolution ) // The queries have to be relative to "now" defaultClientConfig := Config{ BaseUrl: "https://blueflood.url", TenantId: "square", Ttls: make(map[string]int64), Timeout: time.Millisecond, FullResolutionOverlap: 14400, GraphiteMetricConverter: &graphite, TimeSource: timeSource, } regularQueryURL := fmt.Sprintf( "https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=MIN5&select=numPoints%%2Caverage&to=%d", queryTimerange.Start(), queryTimerange.End()+queryTimerange.ResolutionMillis(), ) regularResponse := fmt.Sprintf(`{ "unit": "unknown", "values": [ { "numPoints": 28, "timestamp": %d, "average": 100 }, { "numPoints": 29, "timestamp": %d, "average": 142 }, { "numPoints": 27, "timestamp": %d, "average": 138 }, { "numPoints": 28, "timestamp": %d, "average": 182 } ], "metadata": { "limit": null, "next_href": null, "count": 4, "marker": null } }`, baseTime-300*1000*10, // 50 minutes ago baseTime-300*1000*9, // 45 minutes ago baseTime-300*1000*8, // 40 minutes ago baseTime-300*1000*7, // 35 minutes ago ) fullResolutionQueryURL := fmt.Sprintf( "https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=FULL&select=numPoints%%2Caverage&to=%d", queryTimerange.Start(), queryTimerange.End()+queryTimerange.ResolutionMillis(), ) fullResolutionResponse := fmt.Sprintf(`{ "unit": "unknown", "values": [ { "numPoints": 28, "timestamp": %d, "average": 13 }, { "numPoints": 29, "timestamp": %d, "average": 16 }, { "numPoints": 27, "timestamp": %d, "average": 19 }, { "numPoints": 28, "timestamp": %d, "average": 27 } ], "metadata": { "limit": null, "next_href": null, "count": 4, "marker": null } }`, baseTime-300*1000*6, // 30m ago baseTime-300*1000*5+17, // 25m ago with random shuffling baseTime-300*1000*4+2821, // 20m ago with random shuffling baseTime-300*1000*3, // 15m ago ) fakeHttpClient := mocks.NewFakeHttpClient() fakeHttpClient.SetResponse(regularQueryURL, mocks.Response{regularResponse, 0, http.StatusOK}) fakeHttpClient.SetResponse(fullResolutionQueryURL, mocks.Response{fullResolutionResponse, 0, http.StatusOK}) defaultClientConfig.HttpClient = fakeHttpClient defaultClientConfig.TimeSource = timeSource b := NewBlueflood(defaultClientConfig) if err != nil { t.Fatalf("timerange error: %s", err.Error()) } seriesList, err := b.FetchSingleTimeseries(api.FetchTimeseriesRequest{ Metric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, SampleMethod: api.SampleMean, Timerange: queryTimerange, MetricMetadata: fakeApi, Cancellable: api.NewCancellable(), }) if err != nil { t.Fatalf("Expected success, but got error: %s", err.Error()) } expected := []float64{100, 142, 138, 182, 13, 16, 19} if len(seriesList.Values) != len(expected) { t.Fatalf("Expected %+v but got %+v", expected, seriesList) } for i, expect := range expected { if seriesList.Values[i] != expect { t.Fatalf("Expected %+v but got %+v", expected, seriesList) } } }
func TestIncludeRawPayload(t *testing.T) { graphite := mocks.FakeGraphiteConverter{ MetricMap: map[util.GraphiteMetric]api.TaggedMetric{ util.GraphiteMetric("some.key.value"): api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, }, } fakeApi := mocks.NewFakeMetricMetadataAPI() fakeApi.AddPair( api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, util.GraphiteMetric("some.key.value"), &graphite, ) now := time.Unix(1438734300000, 0) baseTime := now.Unix() * 1000 timeSource := func() time.Time { return now } queryTimerange, err := api.NewSnappedTimerange( int64(baseTime)-300*1000*10, // 50 minutes ago int64(baseTime)-300*1000*4, // 20 minutes ago 300*1000, // 5 minute resolution ) // The queries have to be relative to "now" defaultClientConfig := Config{ BaseUrl: "https://blueflood.url", TenantId: "square", Ttls: make(map[string]int64), Timeout: time.Millisecond, FullResolutionOverlap: 14400, GraphiteMetricConverter: &graphite, TimeSource: timeSource, } regularQueryURL := fmt.Sprintf( "https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=MIN5&select=numPoints%%2Caverage&to=%d", queryTimerange.Start(), queryTimerange.End()+queryTimerange.ResolutionMillis(), ) regularResponse := fmt.Sprintf(`{ "unit": "unknown", "values": [ { "numPoints": 28, "timestamp": %d, "average": 100 }, { "numPoints": 29, "timestamp": %d, "average": 142 }, { "numPoints": 27, "timestamp": %d, "average": 138 }, { "numPoints": 28, "timestamp": %d, "average": 182 } ], "metadata": { "limit": null, "next_href": null, "count": 4, "marker": null } }`, baseTime-300*1000*10, // 50 minutes ago baseTime-300*1000*9, // 45 minutes ago baseTime-300*1000*8, // 40 minutes ago baseTime-300*1000*7, // 35 minutes ago ) fakeHttpClient := mocks.NewFakeHttpClient() fakeHttpClient.SetResponse(regularQueryURL, mocks.Response{regularResponse, 0, http.StatusOK}) // fakeHttpClient.SetResponse(fullResolutionQueryURL, mocks.Response{fullResolutionResponse, 0, http.StatusOK}) defaultClientConfig.HttpClient = fakeHttpClient defaultClientConfig.TimeSource = timeSource b := NewBlueflood(defaultClientConfig) if err != nil { t.Fatalf("timerange error: %s", err.Error()) } userConfig := api.UserSpecifiableConfig{ IncludeRawData: true, } timeSeries, err := b.FetchSingleTimeseries(api.FetchTimeseriesRequest{ Metric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, SampleMethod: api.SampleMean, Timerange: queryTimerange, MetricMetadata: fakeApi, Cancellable: api.NewCancellable(), UserSpecifiableConfig: userConfig, }) if err != nil { t.Fatalf("Expected success, but got error: %s", err.Error()) } if timeSeries.Raw == nil || string(timeSeries.Raw[0]) != regularResponse { t.Fatalf("Didn't fill in the raw result correctly, got: %s\n", string(timeSeries.Raw[0])) } }