func TestCommand_Describe(t *testing.T) {
	fakeApi := mocks.NewFakeApi()
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=west,env=production,host=a")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=west,env=staging,host=b")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=east,env=production,host=c")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=east,env=staging,host=d")}, emptyGraphiteName)

	for _, test := range []struct {
		query   string
		backend api.API
		length  int // expected length of the result.
	}{
		{"describe series_0", fakeApi, 4},
		{"describe`series_0`", fakeApi, 4},
		{"describe series_0 where dc='west'", fakeApi, 2},
		{"describe`series_0`where(dc='west')", fakeApi, 2},
		{"describe series_0 where dc='west' or env = 'production'", fakeApi, 3},
		{"describe series_0 where`dc`='west'or`env`='production'", fakeApi, 3},
		{"describe series_0 where dc='west' or env = 'production' and doesnotexist = ''", fakeApi, 2},
		{"describe series_0 where env = 'production' and doesnotexist = '' or dc = 'west'", fakeApi, 2},
		{"describe series_0 where (dc='west' or env = 'production') and doesnotexist = ''", fakeApi, 0},
		{"describe series_0 where(dc='west' or env = 'production')and`doesnotexist` = ''", fakeApi, 0},
	} {
		a := assert.New(t).Contextf("query=%s", test.query)
		command, err := Parse(test.query)
		if err != nil {
			a.Errorf("Unexpected error while parsing")
			continue
		}
		a.EqString(command.Name(), "describe")
		rawResult, _ := command.Execute(ExecutionContext{Backend: nil, API: test.backend, FetchLimit: 1000, Timeout: 0})
		parsedResult := rawResult.([]string)
		a.EqInt(len(parsedResult), test.length)
	}
}
Beispiel #2
0
func TestCommand_DescribeAll(t *testing.T) {
	fakeApi := mocks.NewFakeApi()
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_1", api.ParseTagSet("")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_2", api.ParseTagSet("")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_3", api.ParseTagSet("")}, emptyGraphiteName)

	for _, test := range []struct {
		query    string
		backend  api.API
		expected []api.MetricKey
	}{
		{"describe all", fakeApi, []api.MetricKey{"series_0", "series_1", "series_2", "series_3"}},
		{"describe all match '_0'", fakeApi, []api.MetricKey{"series_0"}},
		{"describe all match '_5'", fakeApi, []api.MetricKey{}},
	} {
		a := assert.New(t).Contextf("query=%s", test.query)
		command, err := Parse(test.query)
		if err != nil {
			a.Errorf("Unexpected error while parsing")
			continue
		}

		a.EqString(command.Name(), "describe all")
		rawResult, err := command.Execute(ExecutionContext{Backend: nil, API: test.backend, FetchLimit: 1000, Timeout: 0})
		a.CheckError(err)
		a.Eq(rawResult, test.expected)
	}
}
Beispiel #3
0
func TestCommand_Describe(t *testing.T) {
	fakeApi := mocks.NewFakeApi()
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=west,env=production,host=a")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=west,env=staging,host=b")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=east,env=production,host=c")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_0", api.ParseTagSet("dc=east,env=staging,host=d")}, emptyGraphiteName)

	for _, test := range []struct {
		query    string
		backend  api.API
		expected map[string][]string
	}{
		{"describe series_0", fakeApi, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c", "d"}}},
		{"describe`series_0`", fakeApi, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c", "d"}}},
		{"describe series_0 where dc='west'", fakeApi, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}},
		{"describe`series_0`where(dc='west')", fakeApi, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}},
		{"describe series_0 where dc='west' or env = 'production'", fakeApi, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c"}}},
		{"describe series_0 where`dc`='west'or`env`='production'", fakeApi, map[string][]string{"dc": {"east", "west"}, "env": {"production", "staging"}, "host": {"a", "b", "c"}}},
		{"describe series_0 where dc='west' or env = 'production' and doesnotexist = ''", fakeApi, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}},
		{"describe series_0 where env = 'production' and doesnotexist = '' or dc = 'west'", fakeApi, map[string][]string{"dc": {"west"}, "env": {"production", "staging"}, "host": {"a", "b"}}},
		{"describe series_0 where (dc='west' or env = 'production') and doesnotexist = ''", fakeApi, map[string][]string{}},
		{"describe series_0 where(dc='west' or env = 'production')and`doesnotexist` = ''", fakeApi, map[string][]string{}},
	} {
		a := assert.New(t).Contextf("query=%s", test.query)
		command, err := Parse(test.query)
		if err != nil {
			a.Errorf("Unexpected error while parsing")
			continue
		}

		a.EqString(command.Name(), "describe")
		rawResult, _ := command.Execute(ExecutionContext{Backend: nil, API: test.backend, FetchLimit: 1000, Timeout: 0})
		a.Eq(rawResult, test.expected)
	}
}
func TestMovingAverage(t *testing.T) {
	fakeAPI := mocks.NewFakeApi()
	fakeAPI.AddPair(api.TaggedMetric{"series", api.NewTagSet()}, "series")

	fakeBackend := movingAverageBackend{}
	timerange, err := api.NewTimerange(1200, 1500, 100)
	if err != nil {
		t.Fatalf(err.Error())
	}

	expression := &functionExpression{
		functionName: "transform.moving_average",
		groupBy:      []string{},
		arguments: []function.Expression{
			&metricFetchExpression{"series", api.TruePredicate},
			durationExpression{"300ms", 300 * time.Millisecond},
		},
	}

	result, err := evaluateToSeriesList(expression,
		function.EvaluationContext{
			API:          fakeAPI,
			MultiBackend: backend.NewSequentialMultiBackend(fakeBackend),
			Timerange:    timerange,
			SampleMethod: api.SampleMean,
			FetchLimit:   function.NewFetchCounter(1000),
			Registry:     registry.Default(),
		})
	if err != nil {
		t.Errorf(err.Error())
	}

	expected := []float64{4, 3, 11.0 / 3, 5}
	if len(result.Series) != 1 {
		t.Fatalf("expected exactly 1 returned series")
	}
	if len(result.Series[0].Values) != len(expected) {
		t.Fatalf("expected exactly %d values in returned series", len(expected))
	}
	const eps = 1e-7
	for i := range expected {
		if math.Abs(result.Series[0].Values[i]-expected[i]) > eps {
			t.Fatalf("expected %+v but got %+v", expected, result.Series[0].Values)
		}
	}
}
Beispiel #5
0
func TestTag(t *testing.T) {
	fakeApi := mocks.NewFakeApi()
	fakeBackend := backend.NewSequentialMultiBackend(fakeApiBackend{})
	tests := []struct {
		query    string
		expected api.SeriesList
	}{
		{
			query: "select series_2 | tag.drop('dc') from 0  to 120",
			expected: api.SeriesList{
				Series: []api.Timeseries{
					{
						Values: []float64{1, 2, 3, 4, 5},
						TagSet: api.TagSet{},
					},
					{
						Values: []float64{3, 0, 3, 6, 2},
						TagSet: api.TagSet{},
					},
				},
			},
		},
		{
			query: "select series_2 | tag.drop('none') from 0  to 120",
			expected: api.SeriesList{
				Series: []api.Timeseries{
					{
						Values: []float64{1, 2, 3, 4, 5},
						TagSet: api.TagSet{"dc": "west"},
					},
					{
						Values: []float64{3, 0, 3, 6, 2},
						TagSet: api.TagSet{"dc": "east"},
					},
				},
			},
		},
		{
			query: "select series_2 | tag.set('dc', 'north') from 0  to 120",
			expected: api.SeriesList{
				Series: []api.Timeseries{
					{
						Values: []float64{1, 2, 3, 4, 5},
						TagSet: api.TagSet{"dc": "north"},
					},
					{
						Values: []float64{3, 0, 3, 6, 2},
						TagSet: api.TagSet{"dc": "north"},
					},
				},
			},
		},
		{
			query: "select series_2 | tag.set('none', 'north') from 0  to 120",
			expected: api.SeriesList{
				Series: []api.Timeseries{
					{
						Values: []float64{1, 2, 3, 4, 5},
						TagSet: api.TagSet{"dc": "west", "none": "north"},
					},
					{
						Values: []float64{3, 0, 3, 6, 2},
						TagSet: api.TagSet{"dc": "east", "none": "north"},
					},
				},
			},
		},
	}
	for _, test := range tests {
		command, err := Parse(test.query)
		if err != nil {
			t.Fatalf("Unexpected error while parsing")
			return
		}
		if command.Name() != "select" {
			t.Errorf("Expected select command but got %s", command.Name())
			continue
		}
		rawResult, err := command.Execute(ExecutionContext{Backend: fakeBackend, API: fakeApi, FetchLimit: 1000, Timeout: 0})
		if err != nil {
			t.Errorf("Unexpected error while execution: %s", err.Error())
			continue
		}
		seriesListList, ok := rawResult.([]api.SeriesList)
		if !ok || len(seriesListList) != 1 {
			t.Errorf("expected query `%s` to produce []value; got %+v :: %T", test.query, rawResult, rawResult)
			continue
		}
		list := seriesListList[0]
		if err != nil {
			t.Fatal(err)
		}
		a := assert.New(t)
		expectedSeries := test.expected.Series
		for i, series := range list.Series {
			a.EqFloatArray(series.Values, expectedSeries[i].Values, 1e-100)
			if !series.TagSet.Equals(expectedSeries[i].TagSet) {
				t.Errorf("expected tagset %+v but got %+v for series %d of query %s", expectedSeries[i].TagSet, series.TagSet, i, test.query)
			}
		}
	}
}
Beispiel #6
0
func TestNaming(t *testing.T) {
	fakeApi := mocks.NewFakeApi()
	fakeBackend := backend.NewSequentialMultiBackend(fakeApiBackend{})
	tests := []struct {
		query    string
		expected string
	}{
		{
			query:    "select series_1 from 0 to 0",
			expected: "series_1",
		},
		{
			query:    "select series_1 + 17 from 0 to 0",
			expected: "(series_1 + 17)",
		},
		{
			query:    "select series_1 + 2342.32 from 0 to 0",
			expected: "(series_1 + 2342.32)",
		},
		{
			query:    "select series_1*17 from 0 to 0",
			expected: "(series_1 * 17)",
		},
		{
			query:    "select aggregate.sum(series_1) from 0 to 0",
			expected: "aggregate.sum(series_1)",
		},
		{
			query:    "select aggregate.sum(series_1 group by dc) from 0 to 0",
			expected: "aggregate.sum(series_1 group by dc)",
		},
		{
			query:    "select aggregate.sum(series_1 group by dc,env) from 0 to 0",
			expected: "aggregate.sum(series_1 group by dc, env)",
		},
		{
			query:    "select aggregate.sum(series_1 collapse by dc) from 0 to 0",
			expected: "aggregate.sum(series_1 collapse by dc)",
		},
		{
			query:    "select aggregate.sum(series_1 collapse by dc,env) from 0 to 0",
			expected: "aggregate.sum(series_1 collapse by dc, env)",
		},
		{
			query:    "select transform.alias(aggregate.sum(series_1 group by dc,env), 'hello') from 0 to 0",
			expected: "hello",
		},
		{
			query:    "select transform.moving_average(series_2, 2h) from 0 to 0",
			expected: "transform.moving_average(series_2, 2h)",
		},
		{
			query:    "select filter.lowest_max(series_2, 6) from 0 to 0",
			expected: "filter.lowest_max(series_2, 6)",
		},
	}
	for _, test := range tests {
		command, err := Parse(test.query)
		if err != nil {
			t.Fatalf("Unexpected error while parsing")
			return
		}
		if command.Name() != "select" {
			t.Errorf("Expected select command but got %s", command.Name())
			continue
		}
		rawResult, err := command.Execute(ExecutionContext{Backend: fakeBackend, API: fakeApi, FetchLimit: 1000, Timeout: 0})
		if err != nil {
			t.Errorf("Unexpected error while execution: %s", err.Error())
			continue
		}
		seriesListList, ok := rawResult.([]api.SeriesList)
		if !ok || len(seriesListList) != 1 {
			t.Errorf("expected query `%s` to produce []value; got %+v :: %T", test.query, rawResult, rawResult)
			continue
		}
		actual := seriesListList[0].Name
		if actual != test.expected {
			t.Errorf("Expected `%s` but got `%s` for query `%s`", test.expected, actual, test.query)
			continue
		}
	}

}
Beispiel #7
0
func TestCommand_Select(t *testing.T) {
	epsilon := 1e-10
	fakeApi := mocks.NewFakeApi()
	fakeApi.AddPair(api.TaggedMetric{"series_1", api.ParseTagSet("dc=west")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_2", api.ParseTagSet("dc=east")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_2", api.ParseTagSet("dc=west")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_3", api.ParseTagSet("dc=west")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_3", api.ParseTagSet("dc=east")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_3", api.ParseTagSet("dc=north")}, emptyGraphiteName)
	fakeApi.AddPair(api.TaggedMetric{"series_timeout", api.ParseTagSet("dc=west")}, emptyGraphiteName)
	var fakeBackend fakeApiBackend
	testTimerange, err := api.NewTimerange(0, 120, 30)
	if err != nil {
		t.Errorf("Invalid test timerange")
		return
	}
	earlyTimerange, err := api.NewTimerange(0, 60, 30)
	if err != nil {
		t.Errorf("Invalid test timerange")
	}
	lateTimerange, err := api.NewTimerange(60, 120, 30)
	if err != nil {
		t.Errorf("Invalid test timerange")
	}
	for _, test := range []struct {
		query       string
		expectError bool
		expected    api.SeriesList
	}{
		{"select does_not_exist from 0 to 120 resolution 30ms", true, api.SeriesList{}},
		{"select series_1 from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{1, 2, 3, 4, 5},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: testTimerange,
			Name:      "series_1",
		}},
		{"select series_timeout from 0 to 120 resolution 30ms", true, api.SeriesList{}},
		{"select series_1 + 1 from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{2, 3, 4, 5, 6},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: testTimerange,
			Name:      "",
		}},
		{"select series_1 * 2 from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{2, 4, 6, 8, 10},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: testTimerange,
			Name:      "",
		}},
		{"select aggregate.max(series_2) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{3, 2, 3, 6, 5},
				api.NewTagSet(),
			}},
			Timerange: testTimerange,
			Name:      "series_2",
		}},
		{"select (1 + series_2) | aggregate.max from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{4, 3, 4, 7, 6},
				api.NewTagSet(),
			}},
			Timerange: testTimerange,
			Name:      "series_2",
		}},
		{"select series_1 from 0 to 60 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{1, 2, 3},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: earlyTimerange,
			Name:      "series_1",
		}},
		{"select transform.timeshift(series_1,31ms) from 0 to 60 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{2, 3, 4},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: earlyTimerange,
			Name:      "series_1",
		}},
		{"select transform.timeshift(series_1,62ms) from 0 to 60 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{3, 4, 5},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: earlyTimerange,
			Name:      "series_1",
		}},
		{"select transform.timeshift(series_1,29ms) from 0 to 60 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{2, 3, 4},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: earlyTimerange,
			Name:      "series_1",
		}},
		{"select transform.timeshift(series_1,-31ms) from 60 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{2, 3, 4},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: lateTimerange,
			Name:      "series_1",
		}},
		{"select transform.timeshift(series_1,-29ms) from 60 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{{
				[]float64{2, 3, 4},
				api.ParseTagSet("dc=west"),
			}},
			Timerange: lateTimerange,
			Name:      "series_1",
		}},
		{"select series_3 from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
			},
		}},
		{"select series_3 | filter.recent_highest_max(3, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
			},
		}},
		{"select series_3 | filter.recent_highest_max(2, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
			},
		}},
		{"select series_3 | filter.recent_highest_max(1, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
			},
		}},
		{"select series_3 | filter.recent_lowest_max(3, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
			},
		}},
		{"select series_3 | filter.recent_lowest_max(4, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
			},
		}},
		{"select series_3 | filter.recent_highest_max(70, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
			},
		}},
		{"select series_3 | filter.recent_lowest_max(2, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
			},
		}},
		{"select series_3 | filter.recent_lowest_max(1, 30ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
			},
		}},
		{"select series_3 | filter.recent_highest_max(3, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
			},
		}},
		{"select series_3 | filter.recent_highest_max(2, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
			},
		}},
		{"select series_3 | filter.recent_highest_max(1, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
			},
		}},
		{"select series_3 | filter.recent_lowest_max(3, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
				{
					[]float64{5, 5, 5, 2, 2},
					api.ParseTagSet("dc=east"),
				},
			},
		}},
		{"select series_3 | filter.recent_lowest_max(2, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
				{
					[]float64{1, 1, 1, 4, 4},
					api.ParseTagSet("dc=west"),
				},
			},
		}},
		{"select series_3 | filter.recent_lowest_max(1, 3000ms) from 0 to 120 resolution 30ms", false, api.SeriesList{
			Series: []api.Timeseries{
				{
					[]float64{3, 3, 3, 3, 3},
					api.ParseTagSet("dc=north"),
				},
			},
		}},
		{"select series_1 from -1000d to now resolution 30s", true, api.SeriesList{}},
	} {
		a := assert.New(t).Contextf("query=%s", test.query)
		expected := test.expected
		command, err := Parse(test.query)
		if err != nil {
			a.Errorf("Unexpected error while parsing")
			continue
		}
		a.EqString(command.Name(), "select")
		rawResult, err := command.Execute(ExecutionContext{
			Backend:    backend.NewSequentialMultiBackend(fakeBackend),
			API:        fakeApi,
			FetchLimit: 1000,
			Timeout:    10 * time.Millisecond,
		})
		if err != nil {
			if !test.expectError {
				a.Errorf("Unexpected error while executing: %s", err.Error())
			}
		} else {
			casted := rawResult.([]function.Value)
			actual, _ := casted[0].ToSeriesList(api.Timerange{})
			a.EqInt(len(actual.Series), len(expected.Series))
			if len(actual.Series) == len(expected.Series) {
				for i := 0; i < len(expected.Series); i++ {
					a.Eq(actual.Series[i].TagSet, expected.Series[i].TagSet)
					actualLength := len(actual.Series[i].Values)
					expectedLength := len(actual.Series[i].Values)
					a.Eq(actualLength, expectedLength)
					if actualLength == expectedLength {
						for j := 0; j < actualLength; j++ {
							a.EqFloat(actual.Series[i].Values[j], expected.Series[i].Values[j], epsilon)
						}
					}
				}
			}
		}
	}

	// Test that the limit is correct
	command, err := Parse("select series_1, series_2 from 0 to 120 resolution 30ms")
	if err != nil {
		t.Fatalf("Unexpected error while parsing")
		return
	}
	context := ExecutionContext{Backend: backend.NewSequentialMultiBackend(fakeBackend), API: fakeApi, FetchLimit: 3, Timeout: 0}
	_, err = command.Execute(context)
	if err != nil {
		t.Fatalf("expected success with limit 3 but got err = %s", err.Error())
		return
	}
	context.FetchLimit = 2
	_, err = command.Execute(context)
	if err == nil {
		t.Fatalf("expected failure with limit = 2")
		return
	}
	command, err = Parse("select series2 from 0 to 120 resolution 30ms")
	if err != nil {
		t.Fatalf("Unexpected error while parsing")
		return
	}
	_, err = command.Execute(context)
	if err != nil {
		t.Fatalf("expected success with limit = 2 but got %s", err.Error())
	}
}
Beispiel #8
0
func Test_Blueflood(t *testing.T) {
	timerange, err := api.NewTimerange(12000, 13000, 1000)
	if err != nil {
		t.Fatalf("invalid testcase timerange")
		return
	}
	defaultClientConfig := Config{
		"https://blueflood.url",
		"square",
		make(map[string]int64),
		time.Millisecond,
		0,
	}
	// Not really MIN1440, but that's what default TTLs will get with the Timerange we use
	defaultQueryUrl := "https://blueflood.url/v2.0/square/views/some.key.graphite?from=12000&resolution=MIN1440&select=numPoints%2Caverage&to=14000"

	for _, test := range []struct {
		name               string
		metricMap          map[api.GraphiteMetric]api.TaggedMetric
		queryMetric        api.TaggedMetric
		sampleMethod       api.SampleMethod
		timerange          api.Timerange
		clientConfig       Config
		queryUrl           string
		queryResponse      string
		queryResponseCode  int
		queryDelay         time.Duration
		expectedErrorCode  api.BackendErrorCode
		expectedSeriesList api.Timeseries
	}{
		{
			name: "Success case",
			metricMap: map[api.GraphiteMetric]api.TaggedMetric{
				api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{
					MetricKey: api.MetricKey("some.key"),
					TagSet:    api.ParseTagSet("tag=value"),
				},
			},
			queryMetric: api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
			sampleMethod: api.SampleMean,
			timerange:    timerange,
			queryUrl:     defaultQueryUrl,
			clientConfig: defaultClientConfig,
			queryResponse: `{
        "unit": "unknown", 
        "values": [
          {
            "numPoints": 1,
            "timestamp": 12000,
            "average": 5
          },
          {
            "numPoints": 1,
            "timestamp": 13000,
            "average": 3
          }
        ],
        "metadata": {
          "limit": null,
          "next_href": null,
          "count": 2,
          "marker": null
        }
      }`,
			expectedSeriesList: api.Timeseries{
				Values: []float64{5, 3},
				TagSet: api.ParseTagSet("tag=value"),
			},
		},
		{
			name: "Failure case - invalid JSON",
			metricMap: map[api.GraphiteMetric]api.TaggedMetric{
				api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{
					MetricKey: api.MetricKey("some.key"),
					TagSet:    api.ParseTagSet("tag=value"),
				},
			},
			queryMetric: api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
			sampleMethod:      api.SampleMean,
			timerange:         timerange,
			clientConfig:      defaultClientConfig,
			queryUrl:          defaultQueryUrl,
			queryResponse:     `{invalid}`,
			expectedErrorCode: api.FetchIOError,
		},
		{
			name: "Failure case - HTTP error",
			metricMap: map[api.GraphiteMetric]api.TaggedMetric{
				api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{
					MetricKey: api.MetricKey("some.key"),
					TagSet:    api.ParseTagSet("tag=value"),
				},
			},
			queryMetric: api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
			sampleMethod:      api.SampleMean,
			timerange:         timerange,
			clientConfig:      defaultClientConfig,
			queryUrl:          defaultQueryUrl,
			queryResponse:     `{}`,
			queryResponseCode: 400,
			expectedErrorCode: api.FetchIOError,
		},
		{
			name: "Failure case - timeout",
			metricMap: map[api.GraphiteMetric]api.TaggedMetric{
				api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{
					MetricKey: api.MetricKey("some.key"),
					TagSet:    api.ParseTagSet("tag=value"),
				},
			},
			queryMetric: api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
			sampleMethod:      api.SampleMean,
			timerange:         timerange,
			clientConfig:      defaultClientConfig,
			queryUrl:          defaultQueryUrl,
			queryResponse:     `{}`,
			queryDelay:        1 * time.Second,
			expectedErrorCode: api.FetchTimeoutError,
		},
	} {
		a := assert.New(t).Contextf("%s", test.name)

		fakeApi := mocks.NewFakeApi()
		for k, v := range test.metricMap {
			fakeApi.AddPair(v, k)
		}

		fakeHttpClient := mocks.NewFakeHttpClient()
		code := test.queryResponseCode
		if code == 0 {
			code = http.StatusOK
		}
		fakeHttpClient.SetResponse(test.queryUrl, mocks.Response{test.queryResponse, test.queryDelay, code})

		b := NewBlueflood(test.clientConfig).(*blueflood)
		b.client = fakeHttpClient

		seriesList, err := b.FetchSingleSeries(api.FetchSeriesRequest{
			Metric:       test.queryMetric,
			SampleMethod: test.sampleMethod,
			Timerange:    test.timerange,
			API:          fakeApi,
			Cancellable:  api.NewCancellable(),
		})

		if test.expectedErrorCode != 0 {
			if err == nil {
				a.Errorf("Expected error, but was successful.")
				continue
			}
			berr, ok := err.(api.BackendError)
			if !ok {
				a.Errorf("Failed to cast error to BackendError")
				continue
			}
			a.Eq(berr.Code, test.expectedErrorCode)
		} else {
			if err != nil {
				a.CheckError(err)
				continue
			}
			a.Eq(seriesList, test.expectedSeriesList)
		}
	}
}
Beispiel #9
0
func TestFullResolutionDataFilling(t *testing.T) {
	// The queries have to be relative to "now"
	defaultClientConfig := Config{
		"https://blueflood.url",
		"square",
		make(map[string]int64),
		time.Millisecond,
		14400,
	}

	baseTime := 1438734300000

	regularQueryURL := fmt.Sprintf(
		"https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=MIN5&select=numPoints%%2Caverage&to=%d",
		baseTime-300*1000*10, // 50 minutes ago
		baseTime-300*1000*3,  // 15 minutes ago
	)

	regularResponse := fmt.Sprintf(`{
	  "unit": "unknown",
	  "values": [
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 100
	    },
	    {
	      "numPoints": 29,
	      "timestamp": %d,
	      "average": 142
	    },
	    {
	      "numPoints": 27,
	      "timestamp": %d,
	      "average": 138
	    },
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 182
	    }
	  ],
	  "metadata": {
	    "limit": null,
	    "next_href": null,
	    "count": 4,
	    "marker": null
	  }
	}`,
		baseTime-300*1000*10, // 50 minutes ago
		baseTime-300*1000*9,  // 45 minutes ago
		baseTime-300*1000*8,  // 40 minutes ago
		baseTime-300*1000*7,  // 35 minutes ago
	)

	fullResolutionQueryURL := fmt.Sprintf(
		"https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=FULL&select=numPoints%%2Caverage&to=%d",
		baseTime-300*1000*10, // 50 minutes ago
		baseTime-300*1000*3,  // 15 minutes ago
	)
	fullResolutionResponse := fmt.Sprintf(`{
	  "unit": "unknown",
	  "values": [
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 13
	    },
	    {
	      "numPoints": 29,
	      "timestamp": %d,
	      "average": 16
	    },
	    {
	      "numPoints": 27,
	      "timestamp": %d,
	      "average": 19
	    },
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 27
	    }
	  ],
	  "metadata": {
	    "limit": null,
	    "next_href": null,
	    "count": 4,
	    "marker": null
	  }
	}`,
		baseTime-300*1000*6,      // 30m ago
		baseTime-300*1000*5+17,   // 25m ago with random shuffling
		baseTime-300*1000*4+2821, // 20m ago with random shuffling
		baseTime-300*1000*3,      // 15m ago
	)

	fakeHttpClient := mocks.NewFakeHttpClient()
	fakeHttpClient.SetResponse(regularQueryURL, mocks.Response{regularResponse, 0, http.StatusOK})
	fakeHttpClient.SetResponse(fullResolutionQueryURL, mocks.Response{fullResolutionResponse, 0, http.StatusOK})

	fakeApi := mocks.NewFakeApi()
	fakeApi.AddPair(
		api.TaggedMetric{
			MetricKey: api.MetricKey("some.key"),
			TagSet:    api.ParseTagSet("tag=value"),
		},
		api.GraphiteMetric("some.key.value"),
	)

	b := NewBlueflood(defaultClientConfig).(*blueflood)
	b.client = fakeHttpClient

	queryTimerange, err := api.NewSnappedTimerange(
		int64(baseTime)-300*1000*10, // 50 minutes ago
		int64(baseTime)-300*1000*4,  // 20 minutes ago
		300*1000,                    // 5 minute resolution
	)
	if err != nil {
		t.Fatalf("timerange error: %s", err.Error())
	}

	seriesList, err := b.FetchSingleSeries(api.FetchSeriesRequest{
		Metric: api.TaggedMetric{
			MetricKey: api.MetricKey("some.key"),
			TagSet:    api.ParseTagSet("tag=value"),
		},
		SampleMethod: api.SampleMean,
		Timerange:    queryTimerange,
		API:          fakeApi,
		Cancellable:  api.NewCancellable(),
	})
	if err != nil {
		t.Fatalf("Expected success, but got error: %s", err.Error())
	}
	expected := []float64{100, 142, 138, 182, 13, 16, 19}
	if len(seriesList.Values) != len(expected) {
		t.Fatalf("Expected %+v but got %+v", expected, seriesList)
	}
	for i, expect := range expected {
		if seriesList.Values[i] != expect {
			t.Fatalf("Expected %+v but got %+v", expected, seriesList)
		}
	}
}