Exemple #1
0
func TestCommand_DescribeAll(t *testing.T) {
	fakeApi := mocks.NewFakeApi()
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_1", api.ParseTagSet("")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_2", api.ParseTagSet("")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_3", api.ParseTagSet("")}, emptyGraphiteName)

	for _, test := range []struct {
		query    string
		backend  api.API
		expected []api.MetricKey
	}{
		{"describe all", fakeApi, []api.MetricKey{"series_0", "series_1", "series_2", "series_3"}},
		{"describe all match '_0'", fakeApi, []api.MetricKey{"series_0"}},
		{"describe all match '_5'", fakeApi, []api.MetricKey{}},
	} {
		a := assert.New(t).Contextf("query=%s", test.query)
		command, err := Parse(test.query)
		if err != nil {
			a.Errorf("Unexpected error while parsing")
			continue
		}

		a.EqString(command.Name(), "describe all")
		rawResult, err := command.Execute(ExecutionContext{Backend: nil, API: test.backend, FetchLimit: 1000, Timeout: 0})
		a.CheckError(err)
		a.Eq(rawResult, test.expected)
	}
}
Exemple #2
0
func TestCommand_Describe(t *testing.T) {
	fakeApi := mocks.NewFakeApi()
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=west,env=production,host=a")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=west,env=staging,host=b")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=east,env=production,host=c")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=east,env=staging,host=d")}, emptyGraphiteName)

	for _, test := range []struct {
		query    string
		backend  api.API
		expected map[string][]string
	}{
		{"describe series_0", fakeApi, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c", "d"}}},
		{"describe`series_0`", fakeApi, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c", "d"}}},
		{"describe series_0 where dc='west'", fakeApi, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}},
		{"describe`series_0`where(dc='west')", fakeApi, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}},
		{"describe series_0 where dc='west' or env = 'production'", fakeApi, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c"}}},
		{"describe series_0 where`dc`='west'or`env`='production'", fakeApi, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c"}}},
		{"describe series_0 where dc='west' or env = 'production' and doesnotexist = ''", fakeApi, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}},
		{"describe series_0 where env = 'production' and doesnotexist = '' or dc = 'west'", fakeApi, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}},
		{"describe series_0 where (dc='west' or env = 'production') and doesnotexist = ''", fakeApi, map[string][]string{}},
		{"describe series_0 where(dc='west' or env = 'production')and`doesnotexist` = ''", fakeApi, map[string][]string{}},
	} {
		a := assert.New(t).Contextf("query=%s", test.query)
		command, err := Parse(test.query)
		if err != nil {
			a.Errorf("Unexpected error while parsing")
			continue
		}

		a.EqString(command.Name(), "describe")
		rawResult, _ := command.Execute(ExecutionContext{Backend: nil, API: test.backend, FetchLimit: 1000, Timeout: 0})
		a.Eq(rawResult, test.expected)
	}
}
func TestCommand_Describe(t *testing.T) {
	fakeApi := mocks.NewFakeApi()
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=west,env=production,host=a")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=west,env=staging,host=b")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=east,env=production,host=c")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=east,env=staging,host=d")}, emptyGraphiteName)

	for _, test := range []struct {
		query   string
		backend api.API
		length  int // expected length of the result.
	}{
		{"describe series_0", fakeApi, 4},
		{"describe`series_0`", fakeApi, 4},
		{"describe series_0 where dc='west'", fakeApi, 2},
		{"describe`series_0`where(dc='west')", fakeApi, 2},
		{"describe series_0 where dc='west' or env = 'production'", fakeApi, 3},
		{"describe series_0 where`dc`='west'or`env`='production'", fakeApi, 3},
		{"describe series_0 where dc='west' or env = 'production' and doesnotexist = ''", fakeApi, 2},
		{"describe series_0 where env = 'production' and doesnotexist = '' or dc = 'west'", fakeApi, 2},
		{"describe series_0 where (dc='west' or env = 'production') and doesnotexist = ''", fakeApi, 0},
		{"describe series_0 where(dc='west' or env = 'production')and`doesnotexist` = ''", fakeApi, 0},
	} {
		a := assert.New(t).Contextf("query=%s", test.query)
		command, err := Parse(test.query)
		if err != nil {
			a.Errorf("Unexpected error while parsing")
			continue
		}
		a.EqString(command.Name(), "describe")
		rawResult, _ := command.Execute(ExecutionContext{Backend: nil, API: test.backend, FetchLimit: 1000, Timeout: 0})
		parsedResult := rawResult.([]string)
		a.EqInt(len(parsedResult), test.length)
	}
}
Exemple #4
0
func (f FakeTimeseriesStorageAPI) FetchSingleTimeseries(request api.FetchTimeseriesRequest) (api.Timeseries, error) {
	defer request.Profiler.Record("Mock FetchSingleTimeseries")()
	metricMap := map[api.MetricKey][]api.Timeseries{
		"series_1": {{Values: []float64{1, 2, 3, 4, 5}, TagSet: api.ParseTagSet("dc=west")}},
		"series_2": {{Values: []float64{1, 2, 3, 4, 5}, TagSet: api.ParseTagSet("dc=west")}, {Values: []float64{3, 0, 3, 6, 2}, TagSet: api.ParseTagSet("dc=east")}},
		"series_3": {{Values: []float64{1, 1, 1, 4, 4}, TagSet: api.ParseTagSet("dc=west")}, {Values: []float64{5, 5, 5, 2, 2}, TagSet: api.ParseTagSet("dc=east")}, {Values: []float64{3, 3, 3, 3, 3}, TagSet: api.ParseTagSet("dc=north")}},
	}
	if string(request.Metric.MetricKey) == "series_timeout" {
		<-make(chan struct{}) // block forever
	}
	list, ok := metricMap[request.Metric.MetricKey]
	if !ok {
		return api.Timeseries{}, errors.New("internal error")
	}
	for _, series := range list {
		if request.Metric.TagSet.Serialize() == series.TagSet.Serialize() {
			// Cut the values based on the Timerange.
			values := make([]float64, request.Timerange.Slots())
			for i := range values {
				values[i] = series.Values[i+int(request.Timerange.Start())/30]
			}
			return api.Timeseries{Values: values, TagSet: series.TagSet}, nil
		}
	}
	return api.Timeseries{}, errors.New("internal error")
}
func Test_MetricName_GetTagSet_DB(t *testing.T) {
	a := assert.New(t)
	db := newDatabase(t)
	if db == nil {
		return
	}
	defer cleanDatabase(t, db)
	if _, err := db.GetTagSet("sample"); err == nil {
		t.Errorf("Cassandra should error on fetching nonexistent metric")
	}

	metricNamesTests := []struct {
		addTest      bool
		metricName   string
		tagString    string
		expectedTags map[string][]string // { metricName: [ tags ] }
	}{
		{true, "sample", "foo=bar1", map[string][]string{
			"sample": []string{"foo=bar1"},
		}},
		{true, "sample", "foo=bar2", map[string][]string{
			"sample": []string{"foo=bar1", "foo=bar2"},
		}},
		{true, "sample2", "foo=bar2", map[string][]string{
			"sample":  []string{"foo=bar1", "foo=bar2"},
			"sample2": []string{"foo=bar2"},
		}},
		{false, "sample2", "foo=bar2", map[string][]string{
			"sample": []string{"foo=bar1", "foo=bar2"},
		}},
		{false, "sample", "foo=bar1", map[string][]string{
			"sample": []string{"foo=bar2"},
		}},
	}

	for _, c := range metricNamesTests {
		if c.addTest {
			a.CheckError(db.AddMetricName(api.MetricKey(c.metricName), api.ParseTagSet(c.tagString)))
		} else {
			a.CheckError(db.RemoveMetricName(api.MetricKey(c.metricName), api.ParseTagSet(c.tagString)))
		}

		for k, v := range c.expectedTags {
			if tags, err := db.GetTagSet(api.MetricKey(k)); err != nil {
				t.Errorf("Error fetching tags")
			} else {
				stringTags := make([]string, len(tags))
				for i, tag := range tags {
					stringTags[i] = tag.Serialize()
				}

				a.EqInt(len(stringTags), len(v))
				sort.Sort(sort.StringSlice(stringTags))
				sort.Sort(sort.StringSlice(v))
				a.Eq(stringTags, v)
			}
		}
	}
}
func Test_GetAllMetrics_API(t *testing.T) {
	a := assert.New(t)
	cassandra, context := newCassandraAPI(t)
	defer cleanAPI(t, cassandra)
	a.CheckError(cassandra.AddMetric(api.TaggedMetric{
		"metric.a",
		api.ParseTagSet("foo=a"),
	}, context))
	a.CheckError(cassandra.AddMetric(api.TaggedMetric{
		"metric.a",
		api.ParseTagSet("foo=b"),
	}, context))
	a.CheckError(cassandra.AddMetrics([]api.TaggedMetric{
		{
			"metric.c",
			api.TagSet{
				"bar": "cat",
			},
		},
		{
			"metric.d",
			api.TagSet{
				"bar": "dog",
			},
		},
		{
			"metric.e",
			api.TagSet{
				"bar": "cat",
			},
		},
	}, context))
	keys, err := cassandra.GetAllMetrics(context)
	a.CheckError(err)
	sort.Sort(api.MetricKeys(keys))
	a.Eq(keys, []api.MetricKey{"metric.a", "metric.c", "metric.d", "metric.e"})
	a.CheckError(cassandra.AddMetric(api.TaggedMetric{
		"metric.b",
		api.ParseTagSet("foo=c"),
	}, context))
	a.CheckError(cassandra.AddMetric(api.TaggedMetric{
		"metric.b",
		api.ParseTagSet("foo=c"),
	}, context))
	keys, err = cassandra.GetAllMetrics(context)
	a.CheckError(err)
	sort.Sort(api.MetricKeys(keys))
	a.Eq(keys, []api.MetricKey{"metric.a", "metric.b", "metric.c", "metric.d", "metric.e"})
}
func TestCommand_Describe(t *testing.T) {
	fakeAPI := mocks.NewFakeMetricMetadataAPI()
	fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_0", api.ParseTagSet("dc=west,env=production,host=a")}, emptyGraphiteName)
	fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_0", api.ParseTagSet("dc=west,env=staging,host=b")}, emptyGraphiteName)
	fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_0", api.ParseTagSet("dc=east,env=production,host=c")}, emptyGraphiteName)
	fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_0", api.ParseTagSet("dc=east,env=staging,host=d")}, emptyGraphiteName)

	for _, test := range []struct {
		query          string
		metricmetadata api.MetricMetadataAPI
		expected       map[string][]string
	}{
		{"describe series_0", fakeAPI, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c", "d"}}},
		{"describe`series_0`", fakeAPI, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c", "d"}}},
		{"describe series_0 where dc='west'", fakeAPI, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}},
		{"describe`series_0`where(dc='west')", fakeAPI, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}},
		{"describe series_0 where dc='west' or env = 'production'", fakeAPI, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c"}}},
		{"describe series_0 where`dc`='west'or`env`='production'", fakeAPI, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c"}}},
		{"describe series_0 where dc='west' or env = 'production' and doesnotexist = ''", fakeAPI, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}},
		{"describe series_0 where env = 'production' and doesnotexist = '' or dc = 'west'", fakeAPI, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}},
		{"describe series_0 where (dc='west' or env = 'production') and doesnotexist = ''", fakeAPI, map[string][]string{}},
		{"describe series_0 where(dc='west' or env = 'production')and`doesnotexist` = ''", fakeAPI, map[string][]string{}},
	} {
		a := assert.New(t).Contextf("query=%s", test.query)
		command, err := Parse(test.query)
		if err != nil {
			a.Errorf("Unexpected error while parsing")
			continue
		}

		a.EqString(command.Name(), "describe")
		fakeTimeseriesStorage := mocks.FakeTimeseriesStorageAPI{}
		rawResult, err := command.Execute(ExecutionContext{
			TimeseriesStorageAPI:      fakeTimeseriesStorage,
			MetricMetadataAPI:         test.metricmetadata,
			FetchLimit:                1000,
			Timeout:                   0,
			OptimizationConfiguration: optimize.NewOptimizationConfiguration(),
		})
		a.CheckError(err)
		a.Eq(rawResult, test.expected)
	}
}
func Test_GetAllMetrics(t *testing.T) {
	a := assert.New(t)
	db := newDatabase(t)
	if db == nil {
		return
	}
	defer cleanDatabase(t, db)
	a.CheckError(db.AddMetricName("metric.a", api.ParseTagSet("foo=a")))
	a.CheckError(db.AddMetricName("metric.a", api.ParseTagSet("foo=b")))
	keys, err := db.GetAllMetrics()
	a.CheckError(err)
	sort.Sort(api.MetricKeys(keys))
	a.Eq(keys, []api.MetricKey{"metric.a"})
	a.CheckError(db.AddMetricName("metric.b", api.ParseTagSet("foo=c")))
	a.CheckError(db.AddMetricName("metric.b", api.ParseTagSet("foo=c")))
	keys, err = db.GetAllMetrics()
	a.CheckError(err)
	sort.Sort(api.MetricKeys(keys))
	a.Eq(keys, []api.MetricKey{"metric.a", "metric.b"})
}
Exemple #9
0
func TestToGraphiteName_Error(t *testing.T) {
	a := assert.New(t)
	rule, err := Compile(RawRule{
		Pattern:          "prefix.%foo%",
		MetricKeyPattern: "test-metric",
	})
	a.CheckError(err)
	reversed, err := rule.ToGraphiteName(api.TaggedMetric{
		MetricKey: "test-metric",
		TagSet:    api.ParseTagSet(""),
	})
	checkConversionErrorCode(t, err, MissingTag)
	a.EqString(string(reversed), "")

	reversed, err = rule.ToGraphiteName(api.TaggedMetric{
		MetricKey: "test-metric-foo",
		TagSet:    api.ParseTagSet("foo=fooValue"),
	})
	checkConversionErrorCode(t, err, CannotInterpolate)
	a.EqString(string(reversed), "")
}
func (f fakeApiBackend) FetchSingleSeries(request api.FetchSeriesRequest) (api.Timeseries, error) {
	metricMap := map[api.MetricKey][]api.Timeseries{
		"series_1": {{[]float64{1, 2, 3, 4, 5}, api.ParseTagSet("dc=west")}},
		"series_2": {{[]float64{1, 2, 3, 4, 5}, api.ParseTagSet("dc=west")}, {[]float64{3, 0, 3, 6, 2}, api.ParseTagSet("dc=east")}},
	}
	if string(request.Metric.MetricKey) == "series_timeout" {
		<-make(chan struct{}) // block forever
	}
	list, ok := metricMap[request.Metric.MetricKey]
	if !ok {
		return api.Timeseries{}, errors.New("internal error")
	}
	for _, series := range list {
		if request.Metric.TagSet.Serialize() == series.TagSet.Serialize() {
			// Cut the values based on the Timerange.
			values := make([]float64, request.Timerange.Slots())
			for i := range values {
				values[i] = series.Values[i+int(request.Timerange.Start())/30]
			}
			return api.Timeseries{values, series.TagSet}, nil
		}
	}
	return api.Timeseries{}, errors.New("internal error")
}
Exemple #11
0
func TestToGraphiteName(t *testing.T) {
	a := assert.New(t)
	rule, err := Compile(RawRule{
		Pattern:          "prefix.%foo%",
		MetricKeyPattern: "test-metric",
	})
	a.CheckError(err)
	tm := api.TaggedMetric{
		MetricKey: "test-metric",
		TagSet:    api.ParseTagSet("foo=fooValue"),
	}
	reversed, err := rule.ToGraphiteName(tm)
	a.CheckError(err)
	a.EqString(string(reversed), "prefix.fooValue")
}
Exemple #12
0
func TestCommand_DescribeAll(t *testing.T) {
	fakeAPI := mocks.NewFakeMetricMetadataAPI()
	fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_0", api.ParseTagSet("")}, emptyGraphiteName)
	fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_1", api.ParseTagSet("")}, emptyGraphiteName)
	fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_2", api.ParseTagSet("")}, emptyGraphiteName)
	fakeAPI.AddPairWithoutGraphite(api.TaggedMetric{"series_3", api.ParseTagSet("")}, emptyGraphiteName)

	for _, test := range []struct {
		query          string
		metricmetadata api.MetricMetadataAPI
		expected       []api.MetricKey
	}{
		{"describe all", fakeAPI, []api.MetricKey{"series_0", "series_1", "series_2", "series_3"}},
		{"describe all match '_0'", fakeAPI, []api.MetricKey{"series_0"}},
		{"describe all match '_5'", fakeAPI, []api.MetricKey{}},
	} {
		a := assert.New(t).Contextf("query=%s", test.query)
		command, err := Parse(test.query)
		if err != nil {
			a.Errorf("Unexpected error while parsing")
			continue
		}

		a.EqString(command.Name(), "describe all")
		fakeMulti := mocks.FakeTimeseriesStorageAPI{}
		rawResult, err := command.Execute(ExecutionContext{
			TimeseriesStorageAPI:      fakeMulti,
			MetricMetadataAPI:         test.metricmetadata,
			FetchLimit:                1000,
			Timeout:                   0,
			OptimizationConfiguration: optimize.NewOptimizationConfiguration(),
		})
		a.CheckError(err)
		a.Eq(rawResult, test.expected)
	}
}
Exemple #13
0
func (db *defaultDatabase) GetTagSet(metricKey api.MetricKey) ([]api.TagSet, error) {
	var tags []api.TagSet
	rawTag := ""
	iterator := db.session.Query(
		"SELECT tag_set FROM metric_names WHERE metric_key = ?",
		metricKey,
	).Iter()
	for iterator.Scan(&rawTag) {
		parsedTagSet := api.ParseTagSet(rawTag)
		if parsedTagSet != nil {
			tags = append(tags, parsedTagSet)
		}
	}
	if err := iterator.Close(); err != nil {
		return nil, err
	}
	return tags, nil
}
Exemple #14
0
func (db *cassandraDatabase) GetTagSet(metricKey api.MetricKey) ([]api.TagSet, error) {
	var tags []api.TagSet
	rawTag := ""
	iterator := db.session.Query(
		"SELECT tag_set FROM metric_names WHERE metric_key = ?",
		metricKey,
	).Iter()
	for iterator.Scan(&rawTag) {
		parsedTagSet := api.ParseTagSet(rawTag)
		if parsedTagSet != nil {
			tags = append(tags, parsedTagSet)
		}
	}
	if err := iterator.Close(); err != nil {
		return nil, err
	}
	if len(tags) == 0 {
		//
		return nil, api.NewNoSuchMetricError(string(metricKey))
	}
	return tags, nil
}
func TestIncludeRawPayload(t *testing.T) {
	graphite := mocks.FakeGraphiteConverter{
		MetricMap: map[util.GraphiteMetric]api.TaggedMetric{
			util.GraphiteMetric("some.key.value"): api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
		},
	}

	fakeApi := mocks.NewFakeMetricMetadataAPI()
	fakeApi.AddPair(
		api.TaggedMetric{
			MetricKey: api.MetricKey("some.key"),
			TagSet:    api.ParseTagSet("tag=value"),
		},
		util.GraphiteMetric("some.key.value"),
		&graphite,
	)

	now := time.Unix(1438734300000, 0)

	baseTime := now.Unix() * 1000
	timeSource := func() time.Time { return now }

	queryTimerange, err := api.NewSnappedTimerange(
		int64(baseTime)-300*1000*10, // 50 minutes ago
		int64(baseTime)-300*1000*4,  // 20 minutes ago
		300*1000,                    // 5 minute resolution
	)

	// The queries have to be relative to "now"
	defaultClientConfig := Config{
		BaseUrl:                 "https://blueflood.url",
		TenantId:                "square",
		Ttls:                    make(map[string]int64),
		Timeout:                 time.Millisecond,
		FullResolutionOverlap:   14400,
		GraphiteMetricConverter: &graphite,
		TimeSource:              timeSource,
	}

	regularQueryURL := fmt.Sprintf(
		"https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=MIN5&select=numPoints%%2Caverage&to=%d",
		queryTimerange.Start(),
		queryTimerange.End()+queryTimerange.ResolutionMillis(),
	)

	regularResponse := fmt.Sprintf(`{
	  "unit": "unknown",
	  "values": [
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 100
	    },
	    {
	      "numPoints": 29,
	      "timestamp": %d,
	      "average": 142
	    },
	    {
	      "numPoints": 27,
	      "timestamp": %d,
	      "average": 138
	    },
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 182
	    }
	  ],
	  "metadata": {
	    "limit": null,
	    "next_href": null,
	    "count": 4,
	    "marker": null
	  }
	}`,
		baseTime-300*1000*10, // 50 minutes ago
		baseTime-300*1000*9,  // 45 minutes ago
		baseTime-300*1000*8,  // 40 minutes ago
		baseTime-300*1000*7,  // 35 minutes ago
	)

	fakeHttpClient := mocks.NewFakeHttpClient()
	fakeHttpClient.SetResponse(regularQueryURL, mocks.Response{regularResponse, 0, http.StatusOK})
	// fakeHttpClient.SetResponse(fullResolutionQueryURL, mocks.Response{fullResolutionResponse, 0, http.StatusOK})
	defaultClientConfig.HttpClient = fakeHttpClient
	defaultClientConfig.TimeSource = timeSource

	b := NewBlueflood(defaultClientConfig)
	if err != nil {
		t.Fatalf("timerange error: %s", err.Error())
	}

	userConfig := api.UserSpecifiableConfig{
		IncludeRawData: true,
	}

	timeSeries, err := b.FetchSingleTimeseries(api.FetchTimeseriesRequest{
		Metric: api.TaggedMetric{
			MetricKey: api.MetricKey("some.key"),
			TagSet:    api.ParseTagSet("tag=value"),
		},
		SampleMethod:          api.SampleMean,
		Timerange:             queryTimerange,
		MetricMetadata:        fakeApi,
		Cancellable:           api.NewCancellable(),
		UserSpecifiableConfig: userConfig,
	})
	if err != nil {
		t.Fatalf("Expected success, but got error: %s", err.Error())
	}

	if timeSeries.Raw == nil || string(timeSeries.Raw[0]) != regularResponse {
		t.Fatalf("Didn't fill in the raw result correctly, got: %s\n", string(timeSeries.Raw[0]))
	}
}
Exemple #16
0
func TestFullResolutionDataFilling(t *testing.T) {
	// The queries have to be relative to "now"
	defaultClientConfig := Config{
		"https://blueflood.url",
		"square",
		make(map[string]int64),
		time.Millisecond,
		14400,
	}

	baseTime := 1438734300000

	regularQueryURL := fmt.Sprintf(
		"https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=MIN5&select=numPoints%%2Caverage&to=%d",
		baseTime-300*1000*10, // 50 minutes ago
		baseTime-300*1000*3,  // 15 minutes ago
	)

	regularResponse := fmt.Sprintf(`{
	  "unit": "unknown",
	  "values": [
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 100
	    },
	    {
	      "numPoints": 29,
	      "timestamp": %d,
	      "average": 142
	    },
	    {
	      "numPoints": 27,
	      "timestamp": %d,
	      "average": 138
	    },
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 182
	    }
	  ],
	  "metadata": {
	    "limit": null,
	    "next_href": null,
	    "count": 4,
	    "marker": null
	  }
	}`,
		baseTime-300*1000*10, // 50 minutes ago
		baseTime-300*1000*9,  // 45 minutes ago
		baseTime-300*1000*8,  // 40 minutes ago
		baseTime-300*1000*7,  // 35 minutes ago
	)

	fullResolutionQueryURL := fmt.Sprintf(
		"https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=FULL&select=numPoints%%2Caverage&to=%d",
		baseTime-300*1000*10, // 50 minutes ago
		baseTime-300*1000*3,  // 15 minutes ago
	)
	fullResolutionResponse := fmt.Sprintf(`{
	  "unit": "unknown",
	  "values": [
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 13
	    },
	    {
	      "numPoints": 29,
	      "timestamp": %d,
	      "average": 16
	    },
	    {
	      "numPoints": 27,
	      "timestamp": %d,
	      "average": 19
	    },
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 27
	    }
	  ],
	  "metadata": {
	    "limit": null,
	    "next_href": null,
	    "count": 4,
	    "marker": null
	  }
	}`,
		baseTime-300*1000*6,      // 30m ago
		baseTime-300*1000*5+17,   // 25m ago with random shuffling
		baseTime-300*1000*4+2821, // 20m ago with random shuffling
		baseTime-300*1000*3,      // 15m ago
	)

	fakeHttpClient := mocks.NewFakeHttpClient()
	fakeHttpClient.SetResponse(regularQueryURL, mocks.Response{regularResponse, 0, http.StatusOK})
	fakeHttpClient.SetResponse(fullResolutionQueryURL, mocks.Response{fullResolutionResponse, 0, http.StatusOK})

	fakeApi := mocks.NewFakeApi()
	fakeApi.AddPair(
		api.TaggedMetric{
			MetricKey: api.MetricKey("some.key"),
			TagSet:    api.ParseTagSet("tag=value"),
		},
		api.GraphiteMetric("some.key.value"),
	)

	b := NewBlueflood(defaultClientConfig).(*blueflood)
	b.client = fakeHttpClient

	queryTimerange, err := api.NewSnappedTimerange(
		int64(baseTime)-300*1000*10, // 50 minutes ago
		int64(baseTime)-300*1000*4,  // 20 minutes ago
		300*1000,                    // 5 minute resolution
	)
	if err != nil {
		t.Fatalf("timerange error: %s", err.Error())
	}

	seriesList, err := b.FetchSingleSeries(api.FetchSeriesRequest{
		Metric: api.TaggedMetric{
			MetricKey: api.MetricKey("some.key"),
			TagSet:    api.ParseTagSet("tag=value"),
		},
		SampleMethod: api.SampleMean,
		Timerange:    queryTimerange,
		API:          fakeApi,
		Cancellable:  api.NewCancellable(),
	})
	if err != nil {
		t.Fatalf("Expected success, but got error: %s", err.Error())
	}
	expected := []float64{100, 142, 138, 182, 13, 16, 19}
	if len(seriesList.Values) != len(expected) {
		t.Fatalf("Expected %+v but got %+v", expected, seriesList)
	}
	for i, expect := range expected {
		if seriesList.Values[i] != expect {
			t.Fatalf("Expected %+v but got %+v", expected, seriesList)
		}
	}
}
Exemple #17
0
func TestCommand_Select(t *testing.T) {
	epsilon := 1e-10
	fakeApi := mocks.NewFakeApi()
	fakeApi.AddPair(api.TaggedMetric{"series_1", api.ParseTagSet("dc=west")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_2", api.ParseTagSet("dc=east")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_2", api.ParseTagSet("dc=west")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_3", api.ParseTagSet("dc=west")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_3", api.ParseTagSet("dc=east")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_3", api.ParseTagSet("dc=north")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_timeout", api.ParseTagSet("dc=west")}, emptyGraphiteName)
	var fakeBackend fakeApiBackend
	testTimerange, err := api.NewTimerange(0, 120, 30)
	if err != nil {
		t.Errorf("Invalid test timerange")
		return
	}
	earlyTimerange, err := api.NewTimerange(0, 60, 30)
	if err != nil {
		t.Errorf("Invalid test timerange")
	}
	lateTimerange, err := api.NewTimerange(60, 120, 30)
	if err != nil {
		t.Errorf("Invalid test timerange")
	}
	for _, test := range []struct {
		query       string
		expectError bool
		expected    api.SeriesList
	}{
		{"select does_not_exist from 0 to 120 resolution 30ms", true, api.SeriesList{}},
		{"select series_1 from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{1, 2, 3, 4, 5},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: testTimerange,
			Name:      "series_1",
		}},
		{"select series_timeout from 0 to 120 resolution 30ms", true, api.SeriesList{}},
		{"select series_1 + 1 from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{2, 3, 4, 5, 6},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: testTimerange,
			Name:      "",
		}},
		{"select series_1 * 2 from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{2, 4, 6, 8, 10},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: testTimerange,
			Name:      "",
		}},
		{"select aggregate.max(series_2) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{3, 2, 3, 6, 5},
				api.NewTagSet(),
			}},
			Timerange: testTimerange,
			Name:      "series_2",
		}},
		{"select (1 + series_2) | aggregate.max from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{4, 3, 4, 7, 6},
				api.NewTagSet(),
			}},
			Timerange: testTimerange,
			Name:      "series_2",
		}},
		{"select series_1 from 0 to 60 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{1, 2, 3},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: earlyTimerange,
			Name:      "series_1",
		}},
		{"select transform.timeshift(series_1,31ms) from 0 to 60 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{2, 3, 4},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: earlyTimerange,
			Name:      "series_1",
		}},
		{"select transform.timeshift(series_1,62ms) from 0 to 60 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{3, 4, 5},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: earlyTimerange,
			Name:      "series_1",
		}},
		{"select transform.timeshift(series_1,29ms) from 0 to 60 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{2, 3, 4},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: earlyTimerange,
			Name:      "series_1",
		}},
		{"select transform.timeshift(series_1,-31ms) from 60 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{2, 3, 4},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: lateTimerange,
			Name:      "series_1",
		}},
		{"select transform.timeshift(series_1,-29ms) from 60 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{2, 3, 4},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: lateTimerange,
			Name:      "series_1",
		}},
		{"select series_3 from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
			},
		}},
		{"select series_3 | filter.recent_highest_max(3, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
			},
		}},
		{"select series_3 | filter.recent_highest_max(2, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
			},
		}},
		{"select series_3 | filter.recent_highest_max(1, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
			},
		}},
		{"select series_3 | filter.recent_lowest_max(3, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
			},
		}},
		{"select series_3 | filter.recent_lowest_max(4, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
			},
		}},
		{"select series_3 | filter.recent_highest_max(70, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
			},
		}},
		{"select series_3 | filter.recent_lowest_max(2, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
			},
		}},
		{"select series_3 | filter.recent_lowest_max(1, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
			},
		}},
		{"select series_3 | filter.recent_highest_max(3, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
			},
		}},
		{"select series_3 | filter.recent_highest_max(2, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
			},
		}},
		{"select series_3 | filter.recent_highest_max(1, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
			},
		}},
		{"select series_3 | filter.recent_lowest_max(3, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
			},
		}},
		{"select series_3 | filter.recent_lowest_max(2, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
			},
		}},
		{"select series_3 | filter.recent_lowest_max(1, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
			},
		}},
		{"select series_1 from -1000d to now resolution 30s", true, api.SeriesList{}},
	} {
		a := assert.New(t).Contextf("query=%s", test.query)
		expected := test.expected
		command, err := Parse(test.query)
		if err != nil {
			a.Errorf("Unexpected error while parsing")
			continue
		}
		a.EqString(command.Name(), "select")
		rawResult, err := command.Execute(ExecutionContext{
			Backend:    backend.NewSequentialMultiBackend(fakeBackend),
			API:        fakeApi,
			FetchLimit: 1000,
			Timeout:    10 * time.Millisecond,
		})
		if err != nil {
			if !test.expectError {
				a.Errorf("Unexpected error while executing: %s", err.Error())
			}
		} else {
			casted := rawResult.([]function.Value)
			actual, _ := casted[0].ToSeriesList(api.Timerange{})
			a.EqInt(len(actual.Series), len(expected.Series))
			if len(actual.Series) == len(expected.Series) {
				for i := 0; i < len(expected.Series); i++ {
					a.Eq(actual.Series[i].TagSet, expected.Series[i].TagSet)
					actualLength := len(actual.Series[i].Values)
					expectedLength := len(actual.Series[i].Values)
					a.Eq(actualLength, expectedLength)
					if actualLength == expectedLength {
						for j := 0; j < actualLength; j++ {
							a.EqFloat(actual.Series[i].Values[j], expected.Series[i].Values[j], epsilon)
						}
					}
				}
			}
		}
	}

	// Test that the limit is correct
	command, err := Parse("select series_1, series_2 from 0 to 120 resolution 30ms")
	if err != nil {
		t.Fatalf("Unexpected error while parsing")
		return
	}
	context := ExecutionContext{Backend: backend.NewSequentialMultiBackend(fakeBackend), API: fakeApi, FetchLimit: 3, Timeout: 0}
	_, err = command.Execute(context)
	if err != nil {
		t.Fatalf("expected success with limit 3 but got err = %s", err.Error())
		return
	}
	context.FetchLimit = 2
	_, err = command.Execute(context)
	if err == nil {
		t.Fatalf("expected failure with limit = 2")
		return
	}
	command, err = Parse("select series2 from 0 to 120 resolution 30ms")
	if err != nil {
		t.Fatalf("Unexpected error while parsing")
		return
	}
	_, err = command.Execute(context)
	if err != nil {
		t.Fatalf("expected success with limit = 2 but got %s", err.Error())
	}
}
Exemple #18
0
func Test_Blueflood(t *testing.T) {
	timerange, err := api.NewTimerange(12000, 13000, 1000)
	if err != nil {
		t.Fatalf("invalid testcase timerange")
		return
	}
	defaultClientConfig := Config{
		"https://blueflood.url",
		"square",
		make(map[string]int64),
		time.Millisecond,
		0,
	}
	// Not really MIN1440, but that's what default TTLs will get with the Timerange we use
	defaultQueryUrl := "https://blueflood.url/v2.0/square/views/some.key.graphite?from=12000&resolution=MIN1440&select=numPoints%2Caverage&to=14000"

	for _, test := range []struct {
		name               string
		metricMap          map[api.GraphiteMetric]api.TaggedMetric
		queryMetric        api.TaggedMetric
		sampleMethod       api.SampleMethod
		timerange          api.Timerange
		clientConfig       Config
		queryUrl           string
		queryResponse      string
		queryResponseCode  int
		queryDelay         time.Duration
		expectedErrorCode  api.BackendErrorCode
		expectedSeriesList api.Timeseries
	}{
		{
			name: "Success case",
			metricMap: map[api.GraphiteMetric]api.TaggedMetric{
				api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{
					MetricKey: api.MetricKey("some.key"),
					TagSet:    api.ParseTagSet("tag=value"),
				},
			},
			queryMetric: api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
			sampleMethod: api.SampleMean,
			timerange:    timerange,
			queryUrl:     defaultQueryUrl,
			clientConfig: defaultClientConfig,
			queryResponse: `{
        "unit": "unknown", 
        "values": [
          {
            "numPoints": 1,
            "timestamp": 12000,
            "average": 5
          },
          {
            "numPoints": 1,
            "timestamp": 13000,
            "average": 3
          }
        ],
        "metadata": {
          "limit": null,
          "next_href": null,
          "count": 2,
          "marker": null
        }
      }`,
			expectedSeriesList: api.Timeseries{
				Values: []float64{5, 3},
				TagSet: api.ParseTagSet("tag=value"),
			},
		},
		{
			name: "Failure case - invalid JSON",
			metricMap: map[api.GraphiteMetric]api.TaggedMetric{
				api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{
					MetricKey: api.MetricKey("some.key"),
					TagSet:    api.ParseTagSet("tag=value"),
				},
			},
			queryMetric: api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
			sampleMethod:      api.SampleMean,
			timerange:         timerange,
			clientConfig:      defaultClientConfig,
			queryUrl:          defaultQueryUrl,
			queryResponse:     `{invalid}`,
			expectedErrorCode: api.FetchIOError,
		},
		{
			name: "Failure case - HTTP error",
			metricMap: map[api.GraphiteMetric]api.TaggedMetric{
				api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{
					MetricKey: api.MetricKey("some.key"),
					TagSet:    api.ParseTagSet("tag=value"),
				},
			},
			queryMetric: api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
			sampleMethod:      api.SampleMean,
			timerange:         timerange,
			clientConfig:      defaultClientConfig,
			queryUrl:          defaultQueryUrl,
			queryResponse:     `{}`,
			queryResponseCode: 400,
			expectedErrorCode: api.FetchIOError,
		},
		{
			name: "Failure case - timeout",
			metricMap: map[api.GraphiteMetric]api.TaggedMetric{
				api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{
					MetricKey: api.MetricKey("some.key"),
					TagSet:    api.ParseTagSet("tag=value"),
				},
			},
			queryMetric: api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
			sampleMethod:      api.SampleMean,
			timerange:         timerange,
			clientConfig:      defaultClientConfig,
			queryUrl:          defaultQueryUrl,
			queryResponse:     `{}`,
			queryDelay:        1 * time.Second,
			expectedErrorCode: api.FetchTimeoutError,
		},
	} {
		a := assert.New(t).Contextf("%s", test.name)

		fakeApi := mocks.NewFakeApi()
		for k, v := range test.metricMap {
			fakeApi.AddPair(v, k)
		}

		fakeHttpClient := mocks.NewFakeHttpClient()
		code := test.queryResponseCode
		if code == 0 {
			code = http.StatusOK
		}
		fakeHttpClient.SetResponse(test.queryUrl, mocks.Response{test.queryResponse, test.queryDelay, code})

		b := NewBlueflood(test.clientConfig).(*blueflood)
		b.client = fakeHttpClient

		seriesList, err := b.FetchSingleSeries(api.FetchSeriesRequest{
			Metric:       test.queryMetric,
			SampleMethod: test.sampleMethod,
			Timerange:    test.timerange,
			API:          fakeApi,
			Cancellable:  api.NewCancellable(),
		})

		if test.expectedErrorCode != 0 {
			if err == nil {
				a.Errorf("Expected error, but was successful.")
				continue
			}
			berr, ok := err.(api.BackendError)
			if !ok {
				a.Errorf("Failed to cast error to BackendError")
				continue
			}
			a.Eq(berr.Code, test.expectedErrorCode)
		} else {
			if err != nil {
				a.CheckError(err)
				continue
			}
			a.Eq(seriesList, test.expectedSeriesList)
		}
	}
}
func Test_MetricName_GetTagSet_API(t *testing.T) {
	a := assert.New(t)
	cassandra, context := newCassandraAPI(t)
	defer cleanAPI(t, cassandra)

	if _, err := cassandra.GetAllTags("sample", context); err == nil {
		t.Errorf("Cassandra API should error on fetching nonexistent metric")
	}

	metricNamesTests := []struct {
		addTest      bool
		metricName   string
		tagString    string
		expectedTags map[string][]string // { metricName: [ tags ] }
	}{
		{true, "sample", "foo=bar1", map[string][]string{
			"sample": []string{"foo=bar1"},
		}},
		{true, "sample", "foo=bar2", map[string][]string{
			"sample": []string{"foo=bar1", "foo=bar2"},
		}},
		{true, "sample2", "foo=bar2", map[string][]string{
			"sample":  []string{"foo=bar1", "foo=bar2"},
			"sample2": []string{"foo=bar2"},
		}},
		{false, "sample2", "foo=bar2", map[string][]string{
			"sample": []string{"foo=bar1", "foo=bar2"},
		}},
		{false, "sample", "foo=bar1", map[string][]string{
			"sample": []string{"foo=bar2"},
		}},
	}

	for _, c := range metricNamesTests {
		if c.addTest {
			a.CheckError(cassandra.AddMetric(api.TaggedMetric{
				api.MetricKey(c.metricName),
				api.ParseTagSet(c.tagString),
			}, context))
		} else {
			a.CheckError(cassandra.RemoveMetric(api.TaggedMetric{

				api.MetricKey(c.metricName),
				api.ParseTagSet(c.tagString),
			}, context))
		}

		for k, v := range c.expectedTags {
			if tags, err := cassandra.GetAllTags(api.MetricKey(k), context); err != nil {
				t.Errorf("Error fetching tags")
			} else {
				stringTags := make([]string, len(tags))
				for i, tag := range tags {
					stringTags[i] = tag.Serialize()
				}

				a.EqInt(len(stringTags), len(v))
				sort.Sort(sort.StringSlice(stringTags))
				sort.Sort(sort.StringSlice(v))
				a.Eq(stringTags, v)
			}
		}
	}
}
Exemple #20
0
func TestProfilerIntegration(t *testing.T) {
	myAPI := mocks.NewFakeMetricMetadataAPI()
	fakeTimeStorage := mocks.FakeTimeseriesStorageAPI{}
	// 	myAPI := fakeAPI{
	// 	tagSets: map[string][]api.TagSet{"A": []api.TagSet{
	// 		{"x": "1", "y": "2"},
	// 		{"x": "2", "y": "2"},
	// 		{"x": "3", "y": "1"},
	// 	},
	// 		"B": []api.TagSet{
	// 			{"q": "foo"},
	// 			{"q": "bar"},
	// 		},
	// 		"C": []api.TagSet{
	// 			{"c": "1"},
	// 			{"c": "2"},
	// 			{"c": "3"},
	// 			{"c": "4"},
	// 			{"c": "5"},
	// 			{"c": "6"},
	// 		},
	// 	},
	// }

	emptyGraphiteName := util.GraphiteMetric("")
	myAPI.AddPairWithoutGraphite(api.TaggedMetric{"A", api.ParseTagSet("x=1,y=2")}, emptyGraphiteName)
	myAPI.AddPairWithoutGraphite(api.TaggedMetric{"A", api.ParseTagSet("x=2,y=2")}, emptyGraphiteName)
	myAPI.AddPairWithoutGraphite(api.TaggedMetric{"A", api.ParseTagSet("x=3,y=1")}, emptyGraphiteName)

	myAPI.AddPairWithoutGraphite(api.TaggedMetric{"B", api.ParseTagSet("q=foo")}, emptyGraphiteName)
	myAPI.AddPairWithoutGraphite(api.TaggedMetric{"B", api.ParseTagSet("q=bar")}, emptyGraphiteName)

	myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=1")}, emptyGraphiteName)
	myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=2")}, emptyGraphiteName)
	myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=3")}, emptyGraphiteName)
	myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=4")}, emptyGraphiteName)
	myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=5")}, emptyGraphiteName)
	myAPI.AddPairWithoutGraphite(api.TaggedMetric{"C", api.ParseTagSet("c=6")}, emptyGraphiteName)

	testCases := []struct {
		query    string
		expected map[string]int
	}{
		{
			query: "describe all",
			expected: map[string]int{
				"describe all.Execute": 1,
				"Mock GetAllMetrics":   1,
			},
		},
		{
			query: "select A from 0 to 0",
			expected: map[string]int{
				"select.Execute":               1,
				"Mock FetchMultipleTimeseries": 1,
				"Mock GetAllTags":              1,
				"Mock FetchSingleTimeseries":   3,
			},
		},
		{
			query: "select A+A from 0 to 0",
			expected: map[string]int{
				"select.Execute":               1,
				"Mock FetchMultipleTimeseries": 2,
				"Mock GetAllTags":              2,
				"Mock FetchSingleTimeseries":   6,
			},
		},
		{
			query: "select A+2 from 0 to 0",
			expected: map[string]int{
				"select.Execute":               1,
				"Mock FetchMultipleTimeseries": 1,
				"Mock GetAllTags":              1,
				"Mock FetchSingleTimeseries":   3,
			},
		},
		{
			query: "select A where y = '2' from 0 to 0",
			expected: map[string]int{
				"select.Execute":               1,
				"Mock FetchMultipleTimeseries": 1,
				"Mock GetAllTags":              1,
				"Mock FetchSingleTimeseries":   2,
			},
		},
		{
			query: "describe A",
			expected: map[string]int{
				"describe.Execute": 1,
				"Mock GetAllTags":  1,
			},
		},
		{
			query: "describe metrics where y='2'",
			expected: map[string]int{
				"describe metrics.Execute": 1,
				"Mock GetMetricsForTag":    1,
			},
		},
		{
			query: "describe all",
			expected: map[string]int{
				"describe all.Execute": 1,
				"Mock GetAllMetrics":   1,
			},
		},
	}

	for _, test := range testCases {
		cmd, err := Parse(test.query)
		if err != nil {
			t.Error(err.Error())
			continue
		}
		profilingCommand, profiler := NewProfilingCommand(cmd)

		_, err = profilingCommand.Execute(ExecutionContext{
			TimeseriesStorageAPI:      fakeTimeStorage,
			MetricMetadataAPI:         myAPI,
			FetchLimit:                10000,
			Timeout:                   time.Second * 4,
			OptimizationConfiguration: optimize.NewOptimizationConfiguration(),
		})

		if err != nil {
			t.Fatal(err.Error())
		}
		list := profiler.All()
		counts := map[string]int{}
		for _, node := range list {
			counts[node.Name()]++
		}

		if len(test.expected) != len(counts) {
			t.Errorf("The number of calls doesn't match the expected amount.")
			t.Errorf("Expected %+v, but got %+v", test.expected, counts)
		}

		for name, count := range test.expected {
			if counts[name] != count {
				t.Errorf("Expected `%s` to have %d occurrences, but had %d\n", name, count, counts[name])
				t.Errorf("Expected: %+v\nBut got: %+v\n", test.expected, counts)
				break
			}
		}

	}

}
func TestFullResolutionDataFilling(t *testing.T) {
	graphite := mocks.FakeGraphiteConverter{
		MetricMap: map[util.GraphiteMetric]api.TaggedMetric{
			util.GraphiteMetric("some.key.value"): api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
		},
	}

	fakeApi := mocks.NewFakeMetricMetadataAPI()
	fakeApi.AddPair(
		api.TaggedMetric{
			MetricKey: api.MetricKey("some.key"),
			TagSet:    api.ParseTagSet("tag=value"),
		},
		util.GraphiteMetric("some.key.value"),
		&graphite,
	)

	now := time.Unix(1438734300000, 0)

	baseTime := now.Unix() * 1000
	timeSource := func() time.Time { return now }

	queryTimerange, err := api.NewSnappedTimerange(
		int64(baseTime)-300*1000*10, // 50 minutes ago
		int64(baseTime)-300*1000*4,  // 20 minutes ago
		300*1000,                    // 5 minute resolution
	)

	// The queries have to be relative to "now"
	defaultClientConfig := Config{
		BaseUrl:                 "https://blueflood.url",
		TenantId:                "square",
		Ttls:                    make(map[string]int64),
		Timeout:                 time.Millisecond,
		FullResolutionOverlap:   14400,
		GraphiteMetricConverter: &graphite,
		TimeSource:              timeSource,
	}

	regularQueryURL := fmt.Sprintf(
		"https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=MIN5&select=numPoints%%2Caverage&to=%d",
		queryTimerange.Start(),
		queryTimerange.End()+queryTimerange.ResolutionMillis(),
	)

	regularResponse := fmt.Sprintf(`{
	  "unit": "unknown",
	  "values": [
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 100
	    },
	    {
	      "numPoints": 29,
	      "timestamp": %d,
	      "average": 142
	    },
	    {
	      "numPoints": 27,
	      "timestamp": %d,
	      "average": 138
	    },
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 182
	    }
	  ],
	  "metadata": {
	    "limit": null,
	    "next_href": null,
	    "count": 4,
	    "marker": null
	  }
	}`,
		baseTime-300*1000*10, // 50 minutes ago
		baseTime-300*1000*9,  // 45 minutes ago
		baseTime-300*1000*8,  // 40 minutes ago
		baseTime-300*1000*7,  // 35 minutes ago
	)

	fullResolutionQueryURL := fmt.Sprintf(
		"https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=FULL&select=numPoints%%2Caverage&to=%d",
		queryTimerange.Start(),
		queryTimerange.End()+queryTimerange.ResolutionMillis(),
	)
	fullResolutionResponse := fmt.Sprintf(`{
	  "unit": "unknown",
	  "values": [
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 13
	    },
	    {
	      "numPoints": 29,
	      "timestamp": %d,
	      "average": 16
	    },
	    {
	      "numPoints": 27,
	      "timestamp": %d,
	      "average": 19
	    },
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 27
	    }
	  ],
	  "metadata": {
	    "limit": null,
	    "next_href": null,
	    "count": 4,
	    "marker": null
	  }
	}`,
		baseTime-300*1000*6,      // 30m ago
		baseTime-300*1000*5+17,   // 25m ago with random shuffling
		baseTime-300*1000*4+2821, // 20m ago with random shuffling
		baseTime-300*1000*3,      // 15m ago
	)

	fakeHttpClient := mocks.NewFakeHttpClient()
	fakeHttpClient.SetResponse(regularQueryURL, mocks.Response{regularResponse, 0, http.StatusOK})
	fakeHttpClient.SetResponse(fullResolutionQueryURL, mocks.Response{fullResolutionResponse, 0, http.StatusOK})
	defaultClientConfig.HttpClient = fakeHttpClient
	defaultClientConfig.TimeSource = timeSource

	b := NewBlueflood(defaultClientConfig)

	if err != nil {
		t.Fatalf("timerange error: %s", err.Error())
	}

	seriesList, err := b.FetchSingleTimeseries(api.FetchTimeseriesRequest{
		Metric: api.TaggedMetric{
			MetricKey: api.MetricKey("some.key"),
			TagSet:    api.ParseTagSet("tag=value"),
		},
		SampleMethod:   api.SampleMean,
		Timerange:      queryTimerange,
		MetricMetadata: fakeApi,
		Cancellable:    api.NewCancellable(),
	})
	if err != nil {
		t.Fatalf("Expected success, but got error: %s", err.Error())
	}
	expected := []float64{100, 142, 138, 182, 13, 16, 19}
	if len(seriesList.Values) != len(expected) {
		t.Fatalf("Expected %+v but got %+v", expected, seriesList)
	}
	for i, expect := range expected {
		if seriesList.Values[i] != expect {
			t.Fatalf("Expected %+v but got %+v", expected, seriesList)
		}
	}
}