// Execute performs the query represented by the given query string, and returs the result. func (cmd *SelectCommand) Execute(context ExecutionContext) (interface{}, error) { timerange, err := api.NewSnappedTimerange(cmd.context.Start, cmd.context.End, cmd.context.Resolution) if err != nil { return nil, err } hasTimeout := context.Timeout != 0 var cancellable api.Cancellable if hasTimeout { cancellable = api.NewTimeoutCancellable(time.Now().Add(context.Timeout)) } else { cancellable = api.NewCancellable() } r := context.Registry if r == nil { r = registry.Default() } defer close(cancellable.Done()) // broadcast the finish - this ensures that the future work is cancelled. evaluationContext := function.EvaluationContext{ API: context.API, FetchLimit: function.NewFetchCounter(context.FetchLimit), MultiBackend: context.Backend, Predicate: cmd.predicate, SampleMethod: cmd.context.SampleMethod, Timerange: timerange, Cancellable: cancellable, Profiler: context.Profiler, Registry: r, } if hasTimeout { timeout := time.After(context.Timeout) results := make(chan interface{}) errors := make(chan error) go func() { result, err := evaluateExpressions(evaluationContext, cmd.expressions) if err != nil { errors <- err } else { results <- result } }() select { case <-timeout: return nil, fmt.Errorf("Timeout while executing the query.") // timeout. case result := <-results: return result, nil case err := <-errors: return nil, err } } else { return evaluateExpressions(evaluationContext, cmd.expressions) } }
// newDerivativeBasedTransform returns a function.MetricFunction that performs // a delta between two data points. The transform parameter is a function of type // transform is expected to return an array of values whose length is 1 less // than the given series func newDerivativeBasedTransform(name string, transformer transform) function.MetricFunction { return function.MetricFunction{ Name: "transform." + name, MinArguments: 1, MaxArguments: 1, Compute: func(context *function.EvaluationContext, arguments []function.Expression, groups function.Groups) (function.Value, error) { var err error // Calcuate the new timerange to include one extra point to the left newContext := context.Copy() timerange := context.Timerange newContext.Timerange, err = api.NewSnappedTimerange(timerange.Start()-timerange.ResolutionMillis(), timerange.End(), timerange.ResolutionMillis()) if err != nil { return nil, err } // The new context has a timerange which is extended beyond the query's. listValue, err := arguments[0].Evaluate(&newContext) if err != nil { return nil, err } // This value must be a SeriesList. list, err := listValue.ToSeriesList(newContext.Timerange) if err != nil { return nil, err } // Reset the timerange list.Timerange = context.Timerange context.CopyNotesFrom(&newContext) newContext.Invalidate() // Prevent leaking this around. //Apply the original context to the transform even though the list //will include one additional data point. result, err := ApplyTransform(context, list, transformer, []function.Value{}) if err != nil { return nil, err } // Validate our series are the correct length for i := range result.Series { if len(result.Series[i].Values) != len(list.Series[i].Values)-1 { panic(fmt.Sprintf("Expected transform to return %d values, received %d", len(list.Series[i].Values)-1, len(result.Series[i].Values))) } } result.Query = fmt.Sprintf("transform.%s(%s)", name, listValue.GetName()) result.Name = result.Query return result, nil }, } }
// Execute performs the query represented by the given query string, and returs the result. func (cmd *SelectCommand) Execute(context ExecutionContext) (interface{}, error) { timerange, err := api.NewSnappedTimerange(cmd.context.Start, cmd.context.End, cmd.context.Resolution) if err != nil { return nil, err } slotLimit := context.SlotLimit defaultLimit := 1000 if slotLimit == 0 { slotLimit = defaultLimit // the default limit } smallestResolution := timerange.Duration() / time.Duration(slotLimit-2) // ((end + res/2) - (start - res/2)) / res + 1 <= slots // make adjustments for a snap that moves the endpoints // (do some algebra) // (end - start + res) + res <= slots * res // end - start <= res * (slots - 2) // so // res >= (end - start) / (slots - 2) // Update the timerange by applying the insights of the storage API: chosenResolution := context.TimeseriesStorageAPI.ChooseResolution(timerange, smallestResolution) chosenTimerange, err := api.NewSnappedTimerange(timerange.Start(), timerange.End(), int64(chosenResolution/time.Millisecond)) if err != nil { return nil, err } if chosenTimerange.Slots() > slotLimit { return nil, function.NewLimitError( "Requested number of data points exceeds the configured limit", chosenTimerange.Slots(), slotLimit) } hasTimeout := context.Timeout != 0 var cancellable api.Cancellable if hasTimeout { cancellable = api.NewTimeoutCancellable(time.Now().Add(context.Timeout)) } else { cancellable = api.NewCancellable() } r := context.Registry if r == nil { r = registry.Default() } defer close(cancellable.Done()) // broadcast the finish - this ensures that the future work is cancelled. evaluationContext := function.EvaluationContext{ MetricMetadataAPI: context.MetricMetadataAPI, FetchLimit: function.NewFetchCounter(context.FetchLimit), TimeseriesStorageAPI: context.TimeseriesStorageAPI, Predicate: cmd.predicate, SampleMethod: cmd.context.SampleMethod, Timerange: timerange, Cancellable: cancellable, Registry: r, Profiler: context.Profiler, OptimizationConfiguration: context.OptimizationConfiguration, } if hasTimeout { timeout := time.After(context.Timeout) results := make(chan interface{}) errors := make(chan error) go func() { result, err := function.EvaluateMany(evaluationContext, cmd.expressions) if err != nil { errors <- err } else { results <- result } }() select { case <-timeout: return nil, function.NewLimitError("Timeout while executing the query.", context.Timeout, context.Timeout) case result := <-results: return result, nil case err := <-errors: return nil, err } } else { values, err := function.EvaluateMany(evaluationContext, cmd.expressions) if err != nil { return nil, err } lists := make([]api.SeriesList, len(values)) for i := range values { lists[i], err = values[i].ToSeriesList(evaluationContext.Timerange) if err != nil { return nil, err } } return lists, nil } }
// Execute performs the query represented by the given query string, and returs the result. func (cmd *SelectCommand) Execute(context ExecutionContext) (interface{}, error) { timerange, err := api.NewSnappedTimerange(cmd.context.Start, cmd.context.End, cmd.context.Resolution) if err != nil { return nil, err } slotLimit := context.SlotLimit defaultLimit := 1000 if slotLimit == 0 { slotLimit = defaultLimit // the default limit } if timerange.Slots() > slotLimit { return nil, function.NewLimitError( "Requested number of data points exceeds the configured limit", timerange.Slots(), slotLimit) } hasTimeout := context.Timeout != 0 var cancellable api.Cancellable if hasTimeout { cancellable = api.NewTimeoutCancellable(time.Now().Add(context.Timeout)) } else { cancellable = api.NewCancellable() } r := context.Registry if r == nil { r = registry.Default() } defer close(cancellable.Done()) // broadcast the finish - this ensures that the future work is cancelled. evaluationContext := function.EvaluationContext{ API: context.API, FetchLimit: function.NewFetchCounter(context.FetchLimit), MultiBackend: context.Backend, Predicate: cmd.predicate, SampleMethod: cmd.context.SampleMethod, Timerange: timerange, Cancellable: cancellable, Profiler: context.Profiler, Registry: r, } if hasTimeout { timeout := time.After(context.Timeout) results := make(chan interface{}) errors := make(chan error) go func() { result, err := function.EvaluateMany(evaluationContext, cmd.expressions) if err != nil { errors <- err } else { results <- result } }() select { case <-timeout: return nil, function.NewLimitError("Timeout while executing the query.", context.Timeout, context.Timeout) case result := <-results: return result, nil case err := <-errors: return nil, err } } else { values, err := function.EvaluateMany(evaluationContext, cmd.expressions) if err != nil { return nil, err } lists := make([]api.SeriesList, len(values)) for i := range values { lists[i], err = values[i].ToSeriesList(evaluationContext.Timerange) if err != nil { return nil, err } } return lists, nil } }
func (b *Blueflood) FetchSingleTimeseries(request api.FetchTimeseriesRequest) (api.Timeseries, error) { defer request.Profiler.RecordWithDescription("Blueflood FetchSingleTimeseries", request.Metric.String())() sampler, ok := samplerMap[request.SampleMethod] if !ok { return api.Timeseries{}, fmt.Errorf("unsupported SampleMethod %s", request.SampleMethod.String()) } queryResolution := b.config.bluefloodResolution( request.Timerange.Resolution(), request.Timerange.Start(), ) log.Debugf("Blueflood resolution: %s\n", queryResolution.String()) // Sample the data at the given `queryResolution` queryUrl, err := b.constructURL(request, sampler, queryResolution) if err != nil { return api.Timeseries{}, err } rawResults := make([][]byte, 1) parsedResult, rawResult, err := b.fetch(request, queryUrl) rawResults[0] = rawResult if err != nil { return api.Timeseries{}, err } // combinedResult contains the requested data, along with higher-resolution data intended to fill in gaps. combinedResult := parsedResult.Values // Sample the data at the FULL resolution. // We clip the timerange so that it's only #{config.FullResolutionOverlap} seconds long. // This limits the amount of data to be fetched. fullResolutionParsedResult := func() []metricPoint { // If an error occurs, we just return nothing. We don't return the error. // This is so that errors while fetching the FULL-resolution data don't impact the requested data. fullResolutionRequest := request // Copy the request if request.Timerange.End()-request.Timerange.Start() > b.config.FullResolutionOverlap*1000 { // Clip the timerange newTimerange, err := api.NewSnappedTimerange(request.Timerange.End()-b.config.FullResolutionOverlap*1000, request.Timerange.End(), request.Timerange.ResolutionMillis()) if err != nil { log.Infof("FULL resolution data errored while building timerange: %s", err.Error()) return nil } fullResolutionRequest.Timerange = newTimerange } fullResolutionQueryURL, err := b.constructURL(fullResolutionRequest, sampler, ResolutionFull) if err != nil { log.Infof("FULL resolution data errored while building url: %s", err.Error()) return nil } fullResolutionParsedResult, rawResult, err := b.fetch(request, fullResolutionQueryURL) rawResults = append(rawResults, rawResult) if err != nil { log.Infof("FULL resolution data errored while parsing result: %s", err.Error()) return nil } // The higher-resolution data will likely overlap with the requested data. // This isn't a problem - the requested, higher-resolution data will be downsampled by this code. // This downsampling should arrive at the same answer as Blueflood's built-in rollups. return fullResolutionParsedResult.Values }() combinedResult = append(combinedResult, fullResolutionParsedResult...) values := processResult(combinedResult, request.Timerange, sampler, queryResolution) log.Debugf("Constructed timeseries from result: %v", values) if request.UserSpecifiableConfig.IncludeRawData { return api.Timeseries{ Values: values, TagSet: request.Metric.TagSet, Raw: rawResults, }, nil } else { return api.Timeseries{ Values: values, TagSet: request.Metric.TagSet, }, nil } }
if err != nil { return nil, err } size, err := sizeValue.ToDuration() if err != nil { return nil, err } limit := int(float64(size)/float64(context.Timerange.Resolution()) + 0.5) // Limit is the number of items to include in the average if limit < 1 { // At least one value must be included at all times limit = 1 } newContext := context timerange := context.Timerange newContext.Timerange, err = api.NewSnappedTimerange(timerange.Start()-int64(limit-1)*timerange.ResolutionMillis(), timerange.End(), timerange.ResolutionMillis()) if err != nil { return nil, err } // The new context has a timerange which is extended beyond the query's. listValue, err := arguments[0].Evaluate(newContext) if err != nil { return nil, err } // This value must be a SeriesList. list, err := listValue.ToSeriesList(newContext.Timerange) if err != nil { return nil, err }
func TestFullResolutionDataFilling(t *testing.T) { // The queries have to be relative to "now" defaultClientConfig := Config{ "https://blueflood.url", "square", make(map[string]int64), time.Millisecond, 14400, } baseTime := 1438734300000 regularQueryURL := fmt.Sprintf( "https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=MIN5&select=numPoints%%2Caverage&to=%d", baseTime-300*1000*10, // 50 minutes ago baseTime-300*1000*3, // 15 minutes ago ) regularResponse := fmt.Sprintf(`{ "unit": "unknown", "values": [ { "numPoints": 28, "timestamp": %d, "average": 100 }, { "numPoints": 29, "timestamp": %d, "average": 142 }, { "numPoints": 27, "timestamp": %d, "average": 138 }, { "numPoints": 28, "timestamp": %d, "average": 182 } ], "metadata": { "limit": null, "next_href": null, "count": 4, "marker": null } }`, baseTime-300*1000*10, // 50 minutes ago baseTime-300*1000*9, // 45 minutes ago baseTime-300*1000*8, // 40 minutes ago baseTime-300*1000*7, // 35 minutes ago ) fullResolutionQueryURL := fmt.Sprintf( "https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=FULL&select=numPoints%%2Caverage&to=%d", baseTime-300*1000*10, // 50 minutes ago baseTime-300*1000*3, // 15 minutes ago ) fullResolutionResponse := fmt.Sprintf(`{ "unit": "unknown", "values": [ { "numPoints": 28, "timestamp": %d, "average": 13 }, { "numPoints": 29, "timestamp": %d, "average": 16 }, { "numPoints": 27, "timestamp": %d, "average": 19 }, { "numPoints": 28, "timestamp": %d, "average": 27 } ], "metadata": { "limit": null, "next_href": null, "count": 4, "marker": null } }`, baseTime-300*1000*6, // 30m ago baseTime-300*1000*5+17, // 25m ago with random shuffling baseTime-300*1000*4+2821, // 20m ago with random shuffling baseTime-300*1000*3, // 15m ago ) fakeHttpClient := mocks.NewFakeHttpClient() fakeHttpClient.SetResponse(regularQueryURL, mocks.Response{regularResponse, 0, http.StatusOK}) fakeHttpClient.SetResponse(fullResolutionQueryURL, mocks.Response{fullResolutionResponse, 0, http.StatusOK}) fakeApi := mocks.NewFakeApi() fakeApi.AddPair( api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, api.GraphiteMetric("some.key.value"), ) b := NewBlueflood(defaultClientConfig).(*blueflood) b.client = fakeHttpClient queryTimerange, err := api.NewSnappedTimerange( int64(baseTime)-300*1000*10, // 50 minutes ago int64(baseTime)-300*1000*4, // 20 minutes ago 300*1000, // 5 minute resolution ) if err != nil { t.Fatalf("timerange error: %s", err.Error()) } seriesList, err := b.FetchSingleSeries(api.FetchSeriesRequest{ Metric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, SampleMethod: api.SampleMean, Timerange: queryTimerange, API: fakeApi, Cancellable: api.NewCancellable(), }) if err != nil { t.Fatalf("Expected success, but got error: %s", err.Error()) } expected := []float64{100, 142, 138, 182, 13, 16, 19} if len(seriesList.Values) != len(expected) { t.Fatalf("Expected %+v but got %+v", expected, seriesList) } for i, expect := range expected { if seriesList.Values[i] != expect { t.Fatalf("Expected %+v but got %+v", expected, seriesList) } } }
// Execute performs the query represented by the given query string, and returs the result. func (cmd *SelectCommand) Execute(context ExecutionContext) (CommandResult, error) { userTimerange, err := api.NewSnappedTimerange(cmd.context.Start, cmd.context.End, cmd.context.Resolution) if err != nil { return CommandResult{}, err } slotLimit := context.SlotLimit defaultLimit := 1000 if slotLimit == 0 { slotLimit = defaultLimit // the default limit } smallestResolution := userTimerange.Duration() / time.Duration(slotLimit-2) // ((end + res/2) - (start - res/2)) / res + 1 <= slots // make adjustments for a snap that moves the endpoints // (do some algebra) // (end - start + res) + res <= slots * res // end - start <= res * (slots - 2) // so // res >= (end - start) / (slots - 2) // Update the timerange by applying the insights of the storage API: chosenResolution := context.TimeseriesStorageAPI.ChooseResolution(userTimerange, smallestResolution) chosenTimerange, err := api.NewSnappedTimerange(userTimerange.Start(), userTimerange.End(), int64(chosenResolution/time.Millisecond)) if err != nil { return CommandResult{}, err } if chosenTimerange.Slots() > slotLimit { return CommandResult{}, function.NewLimitError( "Requested number of data points exceeds the configured limit", chosenTimerange.Slots(), slotLimit) } hasTimeout := context.Timeout != 0 var cancellable api.Cancellable if hasTimeout { cancellable = api.NewTimeoutCancellable(time.Now().Add(context.Timeout)) } else { cancellable = api.NewCancellable() } r := context.Registry if r == nil { r = registry.Default() } defer close(cancellable.Done()) // broadcast the finish - this ensures that the future work is cancelled. evaluationContext := function.EvaluationContext{ MetricMetadataAPI: context.MetricMetadataAPI, FetchLimit: function.NewFetchCounter(context.FetchLimit), TimeseriesStorageAPI: context.TimeseriesStorageAPI, Predicate: cmd.predicate, SampleMethod: cmd.context.SampleMethod, Timerange: chosenTimerange, Cancellable: cancellable, Registry: r, Profiler: context.Profiler, OptimizationConfiguration: context.OptimizationConfiguration, EvaluationNotes: []string{}, UserSpecifiableConfig: context.UserSpecifiableConfig, } timeout := (<-chan time.Time)(nil) if hasTimeout { // A nil channel will just block forever timeout = time.After(context.Timeout) } results := make(chan []function.Value, 1) errors := make(chan error, 1) // Goroutines are never garbage collected, so we need to provide capacity so that the send always succeeds. go func() { // Evaluate the result, and send it along the goroutines. result, err := function.EvaluateMany(&evaluationContext, cmd.expressions) if err != nil { errors <- err return } results <- result }() select { case <-timeout: return CommandResult{}, function.NewLimitError("Timeout while executing the query.", context.Timeout, context.Timeout) case err := <-errors: return CommandResult{}, err case result := <-results: lists := make([]api.SeriesList, len(result)) for i := range result { lists[i], err = result[i].ToSeriesList(evaluationContext.Timerange) if err != nil { return CommandResult{}, err } } description := map[string][]string{} for _, list := range lists { for _, series := range list.Series { for key, value := range series.TagSet { description[key] = append(description[key], value) } } } for key, values := range description { natural_sort.Sort(values) filtered := []string{} for i := range values { if i == 0 || values[i-1] != values[i] { filtered = append(filtered, values[i]) } } description[key] = filtered } return CommandResult{ Body: lists, Metadata: map[string]interface{}{ "description": description, "notes": evaluationContext.EvaluationNotes, }, }, nil } }
func TestBlueflood_ChooseResolution(t *testing.T) { makeTimerange := func(start, end, resolution int64) api.Timerange { timerange, err := api.NewSnappedTimerange(start, end, resolution) if err != nil { t.Fatalf("error creating testcase timerange: %s", err.Error()) } return timerange } // The millisecond epoch for Sep 1, 2001. start := int64(999316800000) second := int64(1000) minute := 60 * second hour := 60 * minute day := 24 * hour tests := []struct { input api.Timerange slotLimit int expected time.Duration }{ { input: makeTimerange(start, start+4*hour, 30*second), slotLimit: 5000, expected: 30 * time.Second, }, { input: makeTimerange(start, start+4*hour, 30*second), slotLimit: 50, expected: 5 * time.Minute, }, { input: makeTimerange(start, start+4*hour, 30*second), slotLimit: 470, expected: 5 * time.Minute, }, { input: makeTimerange(start, start+40*hour, 30*second), slotLimit: 500, expected: 5 * time.Minute, }, { input: makeTimerange(start, start+40*hour, 30*second), slotLimit: 4700, expected: 5 * time.Minute, }, { input: makeTimerange(start, start+40*hour, 30*second), slotLimit: 110, expected: 1 * time.Hour, }, { input: makeTimerange(start, start+70*day, 30*second), slotLimit: 200, expected: 24 * time.Hour, }, { input: makeTimerange(start-25*day, start, 30*second), slotLimit: 200, expected: 24 * time.Hour, }, } b := &Blueflood{ config: Config{ Ttls: map[string]int64{ "FULL": 1, "MIN5": 30, "MIN20": 60, "MIN60": 90, "MIN240": 20, "MIN1440": 365, }, }, timeSource: func() time.Time { return time.Unix(start/1000, 0) }, } for i, test := range tests { smallestResolution := test.input.Duration() / time.Duration(test.slotLimit-2) result := b.ChooseResolution(test.input, smallestResolution) // This is mostly a sanity check: _, err := api.NewSnappedTimerange(test.input.Start(), test.input.End(), int64(result/time.Millisecond)) if err != nil { t.Errorf("Test %+v:\nEncountered error when building timerange: %s", test, err.Error()) } if result != test.expected { t.Errorf("Testcase %d failed: expected %+v but got %+v; slot limit %d", i, test.expected, result, test.slotLimit) } } }
func TestFullResolutionDataFilling(t *testing.T) { graphite := mocks.FakeGraphiteConverter{ MetricMap: map[util.GraphiteMetric]api.TaggedMetric{ util.GraphiteMetric("some.key.value"): api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, }, } fakeApi := mocks.NewFakeMetricMetadataAPI() fakeApi.AddPair( api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, util.GraphiteMetric("some.key.value"), &graphite, ) now := time.Unix(1438734300000, 0) baseTime := now.Unix() * 1000 timeSource := func() time.Time { return now } queryTimerange, err := api.NewSnappedTimerange( int64(baseTime)-300*1000*10, // 50 minutes ago int64(baseTime)-300*1000*4, // 20 minutes ago 300*1000, // 5 minute resolution ) // The queries have to be relative to "now" defaultClientConfig := Config{ BaseUrl: "https://blueflood.url", TenantId: "square", Ttls: make(map[string]int64), Timeout: time.Millisecond, FullResolutionOverlap: 14400, GraphiteMetricConverter: &graphite, TimeSource: timeSource, } regularQueryURL := fmt.Sprintf( "https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=MIN5&select=numPoints%%2Caverage&to=%d", queryTimerange.Start(), queryTimerange.End()+queryTimerange.ResolutionMillis(), ) regularResponse := fmt.Sprintf(`{ "unit": "unknown", "values": [ { "numPoints": 28, "timestamp": %d, "average": 100 }, { "numPoints": 29, "timestamp": %d, "average": 142 }, { "numPoints": 27, "timestamp": %d, "average": 138 }, { "numPoints": 28, "timestamp": %d, "average": 182 } ], "metadata": { "limit": null, "next_href": null, "count": 4, "marker": null } }`, baseTime-300*1000*10, // 50 minutes ago baseTime-300*1000*9, // 45 minutes ago baseTime-300*1000*8, // 40 minutes ago baseTime-300*1000*7, // 35 minutes ago ) fullResolutionQueryURL := fmt.Sprintf( "https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=FULL&select=numPoints%%2Caverage&to=%d", queryTimerange.Start(), queryTimerange.End()+queryTimerange.ResolutionMillis(), ) fullResolutionResponse := fmt.Sprintf(`{ "unit": "unknown", "values": [ { "numPoints": 28, "timestamp": %d, "average": 13 }, { "numPoints": 29, "timestamp": %d, "average": 16 }, { "numPoints": 27, "timestamp": %d, "average": 19 }, { "numPoints": 28, "timestamp": %d, "average": 27 } ], "metadata": { "limit": null, "next_href": null, "count": 4, "marker": null } }`, baseTime-300*1000*6, // 30m ago baseTime-300*1000*5+17, // 25m ago with random shuffling baseTime-300*1000*4+2821, // 20m ago with random shuffling baseTime-300*1000*3, // 15m ago ) fakeHttpClient := mocks.NewFakeHttpClient() fakeHttpClient.SetResponse(regularQueryURL, mocks.Response{regularResponse, 0, http.StatusOK}) fakeHttpClient.SetResponse(fullResolutionQueryURL, mocks.Response{fullResolutionResponse, 0, http.StatusOK}) defaultClientConfig.HttpClient = fakeHttpClient defaultClientConfig.TimeSource = timeSource b := NewBlueflood(defaultClientConfig) if err != nil { t.Fatalf("timerange error: %s", err.Error()) } seriesList, err := b.FetchSingleTimeseries(api.FetchTimeseriesRequest{ Metric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, SampleMethod: api.SampleMean, Timerange: queryTimerange, MetricMetadata: fakeApi, Cancellable: api.NewCancellable(), }) if err != nil { t.Fatalf("Expected success, but got error: %s", err.Error()) } expected := []float64{100, 142, 138, 182, 13, 16, 19} if len(seriesList.Values) != len(expected) { t.Fatalf("Expected %+v but got %+v", expected, seriesList) } for i, expect := range expected { if seriesList.Values[i] != expect { t.Fatalf("Expected %+v but got %+v", expected, seriesList) } } }
func TestIncludeRawPayload(t *testing.T) { graphite := mocks.FakeGraphiteConverter{ MetricMap: map[util.GraphiteMetric]api.TaggedMetric{ util.GraphiteMetric("some.key.value"): api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, }, } fakeApi := mocks.NewFakeMetricMetadataAPI() fakeApi.AddPair( api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, util.GraphiteMetric("some.key.value"), &graphite, ) now := time.Unix(1438734300000, 0) baseTime := now.Unix() * 1000 timeSource := func() time.Time { return now } queryTimerange, err := api.NewSnappedTimerange( int64(baseTime)-300*1000*10, // 50 minutes ago int64(baseTime)-300*1000*4, // 20 minutes ago 300*1000, // 5 minute resolution ) // The queries have to be relative to "now" defaultClientConfig := Config{ BaseUrl: "https://blueflood.url", TenantId: "square", Ttls: make(map[string]int64), Timeout: time.Millisecond, FullResolutionOverlap: 14400, GraphiteMetricConverter: &graphite, TimeSource: timeSource, } regularQueryURL := fmt.Sprintf( "https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=MIN5&select=numPoints%%2Caverage&to=%d", queryTimerange.Start(), queryTimerange.End()+queryTimerange.ResolutionMillis(), ) regularResponse := fmt.Sprintf(`{ "unit": "unknown", "values": [ { "numPoints": 28, "timestamp": %d, "average": 100 }, { "numPoints": 29, "timestamp": %d, "average": 142 }, { "numPoints": 27, "timestamp": %d, "average": 138 }, { "numPoints": 28, "timestamp": %d, "average": 182 } ], "metadata": { "limit": null, "next_href": null, "count": 4, "marker": null } }`, baseTime-300*1000*10, // 50 minutes ago baseTime-300*1000*9, // 45 minutes ago baseTime-300*1000*8, // 40 minutes ago baseTime-300*1000*7, // 35 minutes ago ) fakeHttpClient := mocks.NewFakeHttpClient() fakeHttpClient.SetResponse(regularQueryURL, mocks.Response{regularResponse, 0, http.StatusOK}) // fakeHttpClient.SetResponse(fullResolutionQueryURL, mocks.Response{fullResolutionResponse, 0, http.StatusOK}) defaultClientConfig.HttpClient = fakeHttpClient defaultClientConfig.TimeSource = timeSource b := NewBlueflood(defaultClientConfig) if err != nil { t.Fatalf("timerange error: %s", err.Error()) } userConfig := api.UserSpecifiableConfig{ IncludeRawData: true, } timeSeries, err := b.FetchSingleTimeseries(api.FetchTimeseriesRequest{ Metric: api.TaggedMetric{ MetricKey: api.MetricKey("some.key"), TagSet: api.ParseTagSet("tag=value"), }, SampleMethod: api.SampleMean, Timerange: queryTimerange, MetricMetadata: fakeApi, Cancellable: api.NewCancellable(), UserSpecifiableConfig: userConfig, }) if err != nil { t.Fatalf("Expected success, but got error: %s", err.Error()) } if timeSeries.Raw == nil || string(timeSeries.Raw[0]) != regularResponse { t.Fatalf("Didn't fill in the raw result correctly, got: %s\n", string(timeSeries.Raw[0])) } }