Esempio n. 1
0
func Test_MetricName_GetTagSet_DB(t *testing.T) {
	a := assert.New(t)
	db := newDatabase(t)
	if db == nil {
		return
	}
	defer cleanDatabase(t, db)
	if _, err := db.GetTagSet("sample"); err == nil {
		t.Errorf("Cassandra should error on fetching nonexistent metric")
	}

	metricNamesTests := []struct {
		addTest      bool
		metricName   string
		tagString    string
		expectedTags map[string][]string // { metricName: [ tags ] }
	}{
		{true, "sample", "foo=bar1", map[string][]string{
			"sample": []string{"foo=bar1"},
		}},
		{true, "sample", "foo=bar2", map[string][]string{
			"sample": []string{"foo=bar1", "foo=bar2"},
		}},
		{true, "sample2", "foo=bar2", map[string][]string{
			"sample":  []string{"foo=bar1", "foo=bar2"},
			"sample2": []string{"foo=bar2"},
		}},
		{false, "sample2", "foo=bar2", map[string][]string{
			"sample": []string{"foo=bar1", "foo=bar2"},
		}},
		{false, "sample", "foo=bar1", map[string][]string{
			"sample": []string{"foo=bar2"},
		}},
	}

	for _, c := range metricNamesTests {
		if c.addTest {
			a.CheckError(db.AddMetricName(api.MetricKey(c.metricName), api.ParseTagSet(c.tagString)))
		} else {
			a.CheckError(db.RemoveMetricName(api.MetricKey(c.metricName), api.ParseTagSet(c.tagString)))
		}

		for k, v := range c.expectedTags {
			if tags, err := db.GetTagSet(api.MetricKey(k)); err != nil {
				t.Errorf("Error fetching tags")
			} else {
				stringTags := make([]string, len(tags))
				for i, tag := range tags {
					stringTags[i] = tag.Serialize()
				}

				a.EqInt(len(stringTags), len(v))
				sort.Sort(sort.StringSlice(stringTags))
				sort.Sort(sort.StringSlice(v))
				a.Eq(stringTags, v)
			}
		}
	}
}
Esempio n. 2
0
func (expr *metricFetchExpression) Evaluate(context function.EvaluationContext) (function.Value, error) {
	// Merge predicates appropriately
	var predicate api.Predicate
	if context.Predicate == nil && expr.predicate == nil {
		predicate = api.TruePredicate
	} else if context.Predicate == nil {
		predicate = expr.predicate
	} else if expr.predicate == nil {
		predicate = context.Predicate
	} else {
		predicate = &andPredicate{[]api.Predicate{expr.predicate, context.Predicate}}
	}

	metricTagSets, err := context.API.GetAllTags(api.MetricKey(expr.metricName))
	if err != nil {
		return nil, err
	}
	filtered := applyPredicates(metricTagSets, predicate)

	ok := context.FetchLimit.Consume(len(filtered))

	if !ok {
		return nil, function.NewLimitError("fetch limit exceeded: too many series to fetch",
			context.FetchLimit.Current(),
			context.FetchLimit.Limit())
	}

	metrics := make([]api.TaggedMetric, len(filtered))
	for i := range metrics {
		metrics[i] = api.TaggedMetric{api.MetricKey(expr.metricName), filtered[i]}
	}

	serieslist, err := context.MultiBackend.FetchMultipleSeries(
		api.FetchMultipleRequest{
			metrics,
			context.SampleMethod,
			context.Timerange,
			context.API,
			context.Cancellable,
			context.Profiler,
		},
	)

	if err != nil {
		return nil, err
	}

	serieslist.Name = expr.metricName

	return serieslist, nil
}
Esempio n. 3
0
func report(stat Statistics) {
	total := stat.matched + stat.unmatched
	fmt.Printf("Processed %d entries\n", total)
	fmt.Printf("Matched:   %d\n", stat.matched)
	fmt.Printf("Unmatched: %d\n", stat.unmatched)
	fmt.Printf("Per-rule statistics\n")
	rowformat := "%-60s %7d %7d %7d %7d\n"
	headformat := "%-60s %7s %7s %7s %7s\n"
	fmt.Printf(headformat, "name", "match", "rev-suc", "rev-err", "rev-fail")
	sortedKeys := make([]string, len(stat.perMetric))
	index := 0
	for key := range stat.perMetric {
		sortedKeys[index] = string(key)
		index++
	}
	sort.Strings(sortedKeys)
	for _, key := range sortedKeys {
		perMetric := stat.perMetric[api.MetricKey(key)]
		fmt.Printf(rowformat,
			string(key),
			perMetric.matched,
			perMetric.reverseSuccess,
			perMetric.reverseError,
			perMetric.reverseIncorrect,
		)
	}
}
Esempio n. 4
0
// MatchRule sees if a given graphite string matches the rule, and if so, returns the generated tag.
func (rule Rule) MatchRule(input string) (api.TaggedMetric, bool) {
	tagSet := extractTagValues(rule.graphitePatternRegex, rule.graphitePatternTags, input)
	if tagSet == nil {
		return api.TaggedMetric{}, false
	}
	interpolatedKey, err := interpolateTags(rule.raw.MetricKeyPattern, tagSet, false)
	if err != nil {
		return api.TaggedMetric{}, false
	}
	// Do not output tags appearing in both graphite metric & metric key.
	// for exmaple, if graphite metric is
	//   `foo.%a%.%b%`
	// and metric key is
	//   `bar.%b%`
	// the resulting tag set should only contain {a} after the matching
	// because %b% is already encoded.
	for _, metricKeyTag := range rule.metricKeyTags {
		if _, containsKey := tagSet[metricKeyTag]; containsKey {
			delete(tagSet, metricKeyTag)
		}
	}
	return api.TaggedMetric{
		api.MetricKey(interpolatedKey),
		tagSet,
	}, true
}
Esempio n. 5
0
// MatchRule sees if a given graphite string matches the rule, and if so, returns the generated tag.
func (rule *Rule) MatchRule(input string) (api.TaggedMetric, bool) {
	if strings.Contains(input, "\x00") {
		log.Errorf("MatchRule (graphite string => metric name) has been given bad metric: `%s`", input)
	}
	tagSet := extractTagValues(rule.graphitePatternRegex, rule.graphitePatternTags, input)
	if tagSet == nil {
		return api.TaggedMetric{}, false
	}
	interpolatedKey, err := interpolateTags(rule.raw.MetricKeyPattern, tagSet, false)
	if err != nil {
		return api.TaggedMetric{}, false
	}
	// Do not output tags appearing in both graphite metric & metric key.
	// for exmaple, if graphite metric is
	//   `foo.%a%.%b%`
	// and metric key is
	//   `bar.%b%`
	// the resulting tag set should only contain {a} after the matching
	// because %b% is already encoded.
	for _, metricKeyTag := range rule.metricKeyTags {
		if _, containsKey := tagSet[metricKeyTag]; containsKey {
			delete(tagSet, metricKeyTag)
		}
	}
	rule.AddMatch(input)
	if strings.Contains(interpolatedKey, "\x00") {
		log.Errorf("MatchRule (graphite string => metric name) is returning bad metric: `%s` from input `%s`", interpolatedKey, input)
	}
	return api.TaggedMetric{
		api.MetricKey(interpolatedKey),
		tagSet,
	}, true
}
Esempio n. 6
0
func (a fakeAPI) GetAllMetrics() ([]api.MetricKey, error) {
	list := []api.MetricKey{}
	for metric := range a.tagSets {
		list = append(list, api.MetricKey(metric))
	}
	return list, nil
}
Esempio n. 7
0
// AddMetricsForTag adds a metric to the Key/Value set list.
func (fa *FakeMetricMetadataAPI) AddMetricsForTag(key string, value string, metric string) {
	pair := struct {
		key   string
		value string
	}{key, value}
	// If the slice was previously nil, it will be expanded.
	fa.metricsForTags[pair] = append(fa.metricsForTags[pair], api.MetricKey(metric))
}
Esempio n. 8
0
// Modification Operations
// =======================
// These operations are used by the embedded code snippets in language.peg
func (p *Parser) makeDescribe() {
	predicateNode, ok := p.popNode(predicateType).(api.Predicate)
	if !ok {
		p.flagTypeAssertion()
		return
	}
	literal := p.popStringLiteral()
	p.command = &DescribeCommand{
		metricName: api.MetricKey(literal),
		predicate:  predicateNode,
	}
}
Esempio n. 9
0
func (a fakeAPI) GetMetricsForTag(tagKey, tagValue string) ([]api.MetricKey, error) {
	list := []api.MetricKey{}
MetricLoop:
	for metric, tagsets := range a.tagSets {
		for _, tagset := range tagsets {
			for key, val := range tagset {
				if key == tagKey && val == tagValue {
					list = append(list, api.MetricKey(metric))
					continue MetricLoop
				}
			}
		}
	}
	return list, nil
}
Esempio n. 10
0
func (fa *FakeMetricMetadataAPI) GetMetricsForTag(tagKey, tagValue string, context api.MetricMetadataAPIContext) ([]api.MetricKey, error) {
	defer context.Profiler.Record("Mock GetMetricsForTag")()
	list := []api.MetricKey{}
MetricLoop:
	for metric, tagsets := range fa.metricTagSets {
		for _, tagset := range tagsets {
			for key, val := range tagset {
				if key == tagKey && val == tagValue {
					list = append(list, api.MetricKey(metric))
					continue MetricLoop
				}
			}
		}
	}
	return list, nil
}
Esempio n. 11
0
func TestCaching(t *testing.T) {
	optimizer := NewOptimizationConfiguration()
	optimizer.EnableMetricMetadataCaching = true

	updateFunc := func() ([]api.TagSet, error) {
		// map[string]string
		result := []api.TagSet{api.NewTagSet()}
		return result, nil
	}
	someMetric := api.MetricKey("blah")
	optimizer.AllTagsCacheHitOrExecute(someMetric, updateFunc)

	updateFunc = func() ([]api.TagSet, error) {
		t.Errorf("Should not be called")
		return nil, nil
	}

	optimizer.AllTagsCacheHitOrExecute(someMetric, updateFunc)
}
Esempio n. 12
0
func TestCacheExpiration(t *testing.T) {
	optimizer := NewOptimizationConfiguration()
	optimizer.EnableMetricMetadataCaching = true

	latch := false
	updateFunc := func() ([]api.TagSet, error) {
		// map[string]string
		latch = true
		result := []api.TagSet{api.NewTagSet()}
		return result, nil
	}
	someMetric := api.MetricKey("blah")
	optimizer.AllTagsCacheHitOrExecute(someMetric, updateFunc)
	if !latch {
		t.Errorf("We expected the update function to be called, but it wasn't")
	}
	optimizer.TimeSourceForNow = func() time.Time { return time.Now().Add(5 * time.Hour) }
	latch = false // Reset the latch

	optimizer.AllTagsCacheHitOrExecute(someMetric, updateFunc)
	if !latch {
		t.Errorf("We expected the update function to be called, but it wasn't")
	}
}
Esempio n. 13
0
func (a fakeAPI) ToTaggedName(metric api.GraphiteMetric) (api.TaggedMetric, error) {
	return api.TaggedMetric{
		MetricKey: api.MetricKey(metric),
		TagSet:    api.NewTagSet(),
	}, nil
}
Esempio n. 14
0
func TestFullResolutionDataFilling(t *testing.T) {
	// The queries have to be relative to "now"
	defaultClientConfig := Config{
		"https://blueflood.url",
		"square",
		make(map[string]int64),
		time.Millisecond,
		14400,
	}

	baseTime := 1438734300000

	regularQueryURL := fmt.Sprintf(
		"https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=MIN5&select=numPoints%%2Caverage&to=%d",
		baseTime-300*1000*10, // 50 minutes ago
		baseTime-300*1000*3,  // 15 minutes ago
	)

	regularResponse := fmt.Sprintf(`{
	  "unit": "unknown",
	  "values": [
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 100
	    },
	    {
	      "numPoints": 29,
	      "timestamp": %d,
	      "average": 142
	    },
	    {
	      "numPoints": 27,
	      "timestamp": %d,
	      "average": 138
	    },
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 182
	    }
	  ],
	  "metadata": {
	    "limit": null,
	    "next_href": null,
	    "count": 4,
	    "marker": null
	  }
	}`,
		baseTime-300*1000*10, // 50 minutes ago
		baseTime-300*1000*9,  // 45 minutes ago
		baseTime-300*1000*8,  // 40 minutes ago
		baseTime-300*1000*7,  // 35 minutes ago
	)

	fullResolutionQueryURL := fmt.Sprintf(
		"https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=FULL&select=numPoints%%2Caverage&to=%d",
		baseTime-300*1000*10, // 50 minutes ago
		baseTime-300*1000*3,  // 15 minutes ago
	)
	fullResolutionResponse := fmt.Sprintf(`{
	  "unit": "unknown",
	  "values": [
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 13
	    },
	    {
	      "numPoints": 29,
	      "timestamp": %d,
	      "average": 16
	    },
	    {
	      "numPoints": 27,
	      "timestamp": %d,
	      "average": 19
	    },
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 27
	    }
	  ],
	  "metadata": {
	    "limit": null,
	    "next_href": null,
	    "count": 4,
	    "marker": null
	  }
	}`,
		baseTime-300*1000*6,      // 30m ago
		baseTime-300*1000*5+17,   // 25m ago with random shuffling
		baseTime-300*1000*4+2821, // 20m ago with random shuffling
		baseTime-300*1000*3,      // 15m ago
	)

	fakeHttpClient := mocks.NewFakeHttpClient()
	fakeHttpClient.SetResponse(regularQueryURL, mocks.Response{regularResponse, 0, http.StatusOK})
	fakeHttpClient.SetResponse(fullResolutionQueryURL, mocks.Response{fullResolutionResponse, 0, http.StatusOK})

	fakeApi := mocks.NewFakeApi()
	fakeApi.AddPair(
		api.TaggedMetric{
			MetricKey: api.MetricKey("some.key"),
			TagSet:    api.ParseTagSet("tag=value"),
		},
		api.GraphiteMetric("some.key.value"),
	)

	b := NewBlueflood(defaultClientConfig).(*blueflood)
	b.client = fakeHttpClient

	queryTimerange, err := api.NewSnappedTimerange(
		int64(baseTime)-300*1000*10, // 50 minutes ago
		int64(baseTime)-300*1000*4,  // 20 minutes ago
		300*1000,                    // 5 minute resolution
	)
	if err != nil {
		t.Fatalf("timerange error: %s", err.Error())
	}

	seriesList, err := b.FetchSingleSeries(api.FetchSeriesRequest{
		Metric: api.TaggedMetric{
			MetricKey: api.MetricKey("some.key"),
			TagSet:    api.ParseTagSet("tag=value"),
		},
		SampleMethod: api.SampleMean,
		Timerange:    queryTimerange,
		API:          fakeApi,
		Cancellable:  api.NewCancellable(),
	})
	if err != nil {
		t.Fatalf("Expected success, but got error: %s", err.Error())
	}
	expected := []float64{100, 142, 138, 182, 13, 16, 19}
	if len(seriesList.Values) != len(expected) {
		t.Fatalf("Expected %+v but got %+v", expected, seriesList)
	}
	for i, expect := range expected {
		if seriesList.Values[i] != expect {
			t.Fatalf("Expected %+v but got %+v", expected, seriesList)
		}
	}
}
Esempio n. 15
0
func Test_Blueflood(t *testing.T) {
	timerange, err := api.NewTimerange(12000, 13000, 1000)
	if err != nil {
		t.Fatalf("invalid testcase timerange")
		return
	}
	defaultClientConfig := Config{
		"https://blueflood.url",
		"square",
		make(map[string]int64),
		time.Millisecond,
		0,
	}
	// Not really MIN1440, but that's what default TTLs will get with the Timerange we use
	defaultQueryUrl := "https://blueflood.url/v2.0/square/views/some.key.graphite?from=12000&resolution=MIN1440&select=numPoints%2Caverage&to=14000"

	for _, test := range []struct {
		name               string
		metricMap          map[api.GraphiteMetric]api.TaggedMetric
		queryMetric        api.TaggedMetric
		sampleMethod       api.SampleMethod
		timerange          api.Timerange
		clientConfig       Config
		queryUrl           string
		queryResponse      string
		queryResponseCode  int
		queryDelay         time.Duration
		expectedErrorCode  api.BackendErrorCode
		expectedSeriesList api.Timeseries
	}{
		{
			name: "Success case",
			metricMap: map[api.GraphiteMetric]api.TaggedMetric{
				api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{
					MetricKey: api.MetricKey("some.key"),
					TagSet:    api.ParseTagSet("tag=value"),
				},
			},
			queryMetric: api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
			sampleMethod: api.SampleMean,
			timerange:    timerange,
			queryUrl:     defaultQueryUrl,
			clientConfig: defaultClientConfig,
			queryResponse: `{
        "unit": "unknown", 
        "values": [
          {
            "numPoints": 1,
            "timestamp": 12000,
            "average": 5
          },
          {
            "numPoints": 1,
            "timestamp": 13000,
            "average": 3
          }
        ],
        "metadata": {
          "limit": null,
          "next_href": null,
          "count": 2,
          "marker": null
        }
      }`,
			expectedSeriesList: api.Timeseries{
				Values: []float64{5, 3},
				TagSet: api.ParseTagSet("tag=value"),
			},
		},
		{
			name: "Failure case - invalid JSON",
			metricMap: map[api.GraphiteMetric]api.TaggedMetric{
				api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{
					MetricKey: api.MetricKey("some.key"),
					TagSet:    api.ParseTagSet("tag=value"),
				},
			},
			queryMetric: api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
			sampleMethod:      api.SampleMean,
			timerange:         timerange,
			clientConfig:      defaultClientConfig,
			queryUrl:          defaultQueryUrl,
			queryResponse:     `{invalid}`,
			expectedErrorCode: api.FetchIOError,
		},
		{
			name: "Failure case - HTTP error",
			metricMap: map[api.GraphiteMetric]api.TaggedMetric{
				api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{
					MetricKey: api.MetricKey("some.key"),
					TagSet:    api.ParseTagSet("tag=value"),
				},
			},
			queryMetric: api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
			sampleMethod:      api.SampleMean,
			timerange:         timerange,
			clientConfig:      defaultClientConfig,
			queryUrl:          defaultQueryUrl,
			queryResponse:     `{}`,
			queryResponseCode: 400,
			expectedErrorCode: api.FetchIOError,
		},
		{
			name: "Failure case - timeout",
			metricMap: map[api.GraphiteMetric]api.TaggedMetric{
				api.GraphiteMetric("some.key.graphite"): api.TaggedMetric{
					MetricKey: api.MetricKey("some.key"),
					TagSet:    api.ParseTagSet("tag=value"),
				},
			},
			queryMetric: api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
			sampleMethod:      api.SampleMean,
			timerange:         timerange,
			clientConfig:      defaultClientConfig,
			queryUrl:          defaultQueryUrl,
			queryResponse:     `{}`,
			queryDelay:        1 * time.Second,
			expectedErrorCode: api.FetchTimeoutError,
		},
	} {
		a := assert.New(t).Contextf("%s", test.name)

		fakeApi := mocks.NewFakeApi()
		for k, v := range test.metricMap {
			fakeApi.AddPair(v, k)
		}

		fakeHttpClient := mocks.NewFakeHttpClient()
		code := test.queryResponseCode
		if code == 0 {
			code = http.StatusOK
		}
		fakeHttpClient.SetResponse(test.queryUrl, mocks.Response{test.queryResponse, test.queryDelay, code})

		b := NewBlueflood(test.clientConfig).(*blueflood)
		b.client = fakeHttpClient

		seriesList, err := b.FetchSingleSeries(api.FetchSeriesRequest{
			Metric:       test.queryMetric,
			SampleMethod: test.sampleMethod,
			Timerange:    test.timerange,
			API:          fakeApi,
			Cancellable:  api.NewCancellable(),
		})

		if test.expectedErrorCode != 0 {
			if err == nil {
				a.Errorf("Expected error, but was successful.")
				continue
			}
			berr, ok := err.(api.BackendError)
			if !ok {
				a.Errorf("Failed to cast error to BackendError")
				continue
			}
			a.Eq(berr.Code, test.expectedErrorCode)
		} else {
			if err != nil {
				a.CheckError(err)
				continue
			}
			a.Eq(seriesList, test.expectedSeriesList)
		}
	}
}
Esempio n. 16
0
func Test_MetricName_GetTagSet_API(t *testing.T) {
	a := assert.New(t)
	cassandra, context := newCassandraAPI(t)
	defer cleanAPI(t, cassandra)

	if _, err := cassandra.GetAllTags("sample", context); err == nil {
		t.Errorf("Cassandra API should error on fetching nonexistent metric")
	}

	metricNamesTests := []struct {
		addTest      bool
		metricName   string
		tagString    string
		expectedTags map[string][]string // { metricName: [ tags ] }
	}{
		{true, "sample", "foo=bar1", map[string][]string{
			"sample": []string{"foo=bar1"},
		}},
		{true, "sample", "foo=bar2", map[string][]string{
			"sample": []string{"foo=bar1", "foo=bar2"},
		}},
		{true, "sample2", "foo=bar2", map[string][]string{
			"sample":  []string{"foo=bar1", "foo=bar2"},
			"sample2": []string{"foo=bar2"},
		}},
		{false, "sample2", "foo=bar2", map[string][]string{
			"sample": []string{"foo=bar1", "foo=bar2"},
		}},
		{false, "sample", "foo=bar1", map[string][]string{
			"sample": []string{"foo=bar2"},
		}},
	}

	for _, c := range metricNamesTests {
		if c.addTest {
			a.CheckError(cassandra.AddMetric(api.TaggedMetric{
				api.MetricKey(c.metricName),
				api.ParseTagSet(c.tagString),
			}, context))
		} else {
			a.CheckError(cassandra.RemoveMetric(api.TaggedMetric{

				api.MetricKey(c.metricName),
				api.ParseTagSet(c.tagString),
			}, context))
		}

		for k, v := range c.expectedTags {
			if tags, err := cassandra.GetAllTags(api.MetricKey(k), context); err != nil {
				t.Errorf("Error fetching tags")
			} else {
				stringTags := make([]string, len(tags))
				for i, tag := range tags {
					stringTags[i] = tag.Serialize()
				}

				a.EqInt(len(stringTags), len(v))
				sort.Sort(sort.StringSlice(stringTags))
				sort.Sort(sort.StringSlice(v))
				a.Eq(stringTags, v)
			}
		}
	}
}
Esempio n. 17
0
func TestFullResolutionDataFilling(t *testing.T) {
	graphite := mocks.FakeGraphiteConverter{
		MetricMap: map[util.GraphiteMetric]api.TaggedMetric{
			util.GraphiteMetric("some.key.value"): api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
		},
	}

	fakeApi := mocks.NewFakeMetricMetadataAPI()
	fakeApi.AddPair(
		api.TaggedMetric{
			MetricKey: api.MetricKey("some.key"),
			TagSet:    api.ParseTagSet("tag=value"),
		},
		util.GraphiteMetric("some.key.value"),
		&graphite,
	)

	now := time.Unix(1438734300000, 0)

	baseTime := now.Unix() * 1000
	timeSource := func() time.Time { return now }

	queryTimerange, err := api.NewSnappedTimerange(
		int64(baseTime)-300*1000*10, // 50 minutes ago
		int64(baseTime)-300*1000*4,  // 20 minutes ago
		300*1000,                    // 5 minute resolution
	)

	// The queries have to be relative to "now"
	defaultClientConfig := Config{
		BaseUrl:                 "https://blueflood.url",
		TenantId:                "square",
		Ttls:                    make(map[string]int64),
		Timeout:                 time.Millisecond,
		FullResolutionOverlap:   14400,
		GraphiteMetricConverter: &graphite,
		TimeSource:              timeSource,
	}

	regularQueryURL := fmt.Sprintf(
		"https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=MIN5&select=numPoints%%2Caverage&to=%d",
		queryTimerange.Start(),
		queryTimerange.End()+queryTimerange.ResolutionMillis(),
	)

	regularResponse := fmt.Sprintf(`{
	  "unit": "unknown",
	  "values": [
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 100
	    },
	    {
	      "numPoints": 29,
	      "timestamp": %d,
	      "average": 142
	    },
	    {
	      "numPoints": 27,
	      "timestamp": %d,
	      "average": 138
	    },
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 182
	    }
	  ],
	  "metadata": {
	    "limit": null,
	    "next_href": null,
	    "count": 4,
	    "marker": null
	  }
	}`,
		baseTime-300*1000*10, // 50 minutes ago
		baseTime-300*1000*9,  // 45 minutes ago
		baseTime-300*1000*8,  // 40 minutes ago
		baseTime-300*1000*7,  // 35 minutes ago
	)

	fullResolutionQueryURL := fmt.Sprintf(
		"https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=FULL&select=numPoints%%2Caverage&to=%d",
		queryTimerange.Start(),
		queryTimerange.End()+queryTimerange.ResolutionMillis(),
	)
	fullResolutionResponse := fmt.Sprintf(`{
	  "unit": "unknown",
	  "values": [
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 13
	    },
	    {
	      "numPoints": 29,
	      "timestamp": %d,
	      "average": 16
	    },
	    {
	      "numPoints": 27,
	      "timestamp": %d,
	      "average": 19
	    },
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 27
	    }
	  ],
	  "metadata": {
	    "limit": null,
	    "next_href": null,
	    "count": 4,
	    "marker": null
	  }
	}`,
		baseTime-300*1000*6,      // 30m ago
		baseTime-300*1000*5+17,   // 25m ago with random shuffling
		baseTime-300*1000*4+2821, // 20m ago with random shuffling
		baseTime-300*1000*3,      // 15m ago
	)

	fakeHttpClient := mocks.NewFakeHttpClient()
	fakeHttpClient.SetResponse(regularQueryURL, mocks.Response{regularResponse, 0, http.StatusOK})
	fakeHttpClient.SetResponse(fullResolutionQueryURL, mocks.Response{fullResolutionResponse, 0, http.StatusOK})
	defaultClientConfig.HttpClient = fakeHttpClient
	defaultClientConfig.TimeSource = timeSource

	b := NewBlueflood(defaultClientConfig)

	if err != nil {
		t.Fatalf("timerange error: %s", err.Error())
	}

	seriesList, err := b.FetchSingleTimeseries(api.FetchTimeseriesRequest{
		Metric: api.TaggedMetric{
			MetricKey: api.MetricKey("some.key"),
			TagSet:    api.ParseTagSet("tag=value"),
		},
		SampleMethod:   api.SampleMean,
		Timerange:      queryTimerange,
		MetricMetadata: fakeApi,
		Cancellable:    api.NewCancellable(),
	})
	if err != nil {
		t.Fatalf("Expected success, but got error: %s", err.Error())
	}
	expected := []float64{100, 142, 138, 182, 13, 16, 19}
	if len(seriesList.Values) != len(expected) {
		t.Fatalf("Expected %+v but got %+v", expected, seriesList)
	}
	for i, expect := range expected {
		if seriesList.Values[i] != expect {
			t.Fatalf("Expected %+v but got %+v", expected, seriesList)
		}
	}
}
Esempio n. 18
0
func (expr *metricFetchExpression) Evaluate(context *function.EvaluationContext) (function.Value, error) {
	// Merge predicates appropriately
	var predicate api.Predicate
	if context.Predicate == nil && expr.predicate == nil {
		predicate = api.TruePredicate
	} else if context.Predicate == nil {
		predicate = expr.predicate
	} else if expr.predicate == nil {
		predicate = context.Predicate
	} else {
		predicate = &andPredicate{[]api.Predicate{expr.predicate, context.Predicate}}
	}

	updateFunction := func() ([]api.TagSet, error) {
		metricTagSets, err := context.MetricMetadataAPI.GetAllTags(api.MetricKey(expr.metricName), api.MetricMetadataAPIContext{
			Profiler: context.Profiler,
		})
		if err != nil {
			return nil, err
		}
		return metricTagSets, nil
	}
	metricTagSets, err := context.OptimizationConfiguration.AllTagsCacheHitOrExecute(api.MetricKey(expr.metricName), updateFunction)
	if err != nil {
		return nil, err
	}
	filtered := applyPredicates(metricTagSets, predicate)

	ok := context.FetchLimit.Consume(len(filtered))

	if !ok {
		return nil, function.NewLimitError("fetch limit exceeded: too many series to fetch",
			context.FetchLimit.Current(),
			context.FetchLimit.Limit())
	}

	metrics := make([]api.TaggedMetric, len(filtered))
	for i := range metrics {
		metrics[i] = api.TaggedMetric{api.MetricKey(expr.metricName), filtered[i]}
	}

	serieslist, err := context.TimeseriesStorageAPI.FetchMultipleTimeseries(
		api.FetchMultipleTimeseriesRequest{
			metrics,
			context.SampleMethod,
			context.Timerange,
			context.MetricMetadataAPI,
			context.Cancellable,
			context.Profiler,
			context.UserSpecifiableConfig,
		},
	)

	if err != nil {
		return nil, err
	}

	serieslist.Name = expr.metricName
	serieslist.Query = expr.metricName

	return serieslist, nil
}
Esempio n. 19
0
func TestIncludeRawPayload(t *testing.T) {
	graphite := mocks.FakeGraphiteConverter{
		MetricMap: map[util.GraphiteMetric]api.TaggedMetric{
			util.GraphiteMetric("some.key.value"): api.TaggedMetric{
				MetricKey: api.MetricKey("some.key"),
				TagSet:    api.ParseTagSet("tag=value"),
			},
		},
	}

	fakeApi := mocks.NewFakeMetricMetadataAPI()
	fakeApi.AddPair(
		api.TaggedMetric{
			MetricKey: api.MetricKey("some.key"),
			TagSet:    api.ParseTagSet("tag=value"),
		},
		util.GraphiteMetric("some.key.value"),
		&graphite,
	)

	now := time.Unix(1438734300000, 0)

	baseTime := now.Unix() * 1000
	timeSource := func() time.Time { return now }

	queryTimerange, err := api.NewSnappedTimerange(
		int64(baseTime)-300*1000*10, // 50 minutes ago
		int64(baseTime)-300*1000*4,  // 20 minutes ago
		300*1000,                    // 5 minute resolution
	)

	// The queries have to be relative to "now"
	defaultClientConfig := Config{
		BaseUrl:                 "https://blueflood.url",
		TenantId:                "square",
		Ttls:                    make(map[string]int64),
		Timeout:                 time.Millisecond,
		FullResolutionOverlap:   14400,
		GraphiteMetricConverter: &graphite,
		TimeSource:              timeSource,
	}

	regularQueryURL := fmt.Sprintf(
		"https://blueflood.url/v2.0/square/views/some.key.value?from=%d&resolution=MIN5&select=numPoints%%2Caverage&to=%d",
		queryTimerange.Start(),
		queryTimerange.End()+queryTimerange.ResolutionMillis(),
	)

	regularResponse := fmt.Sprintf(`{
	  "unit": "unknown",
	  "values": [
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 100
	    },
	    {
	      "numPoints": 29,
	      "timestamp": %d,
	      "average": 142
	    },
	    {
	      "numPoints": 27,
	      "timestamp": %d,
	      "average": 138
	    },
	    {
	      "numPoints": 28,
	      "timestamp": %d,
	      "average": 182
	    }
	  ],
	  "metadata": {
	    "limit": null,
	    "next_href": null,
	    "count": 4,
	    "marker": null
	  }
	}`,
		baseTime-300*1000*10, // 50 minutes ago
		baseTime-300*1000*9,  // 45 minutes ago
		baseTime-300*1000*8,  // 40 minutes ago
		baseTime-300*1000*7,  // 35 minutes ago
	)

	fakeHttpClient := mocks.NewFakeHttpClient()
	fakeHttpClient.SetResponse(regularQueryURL, mocks.Response{regularResponse, 0, http.StatusOK})
	// fakeHttpClient.SetResponse(fullResolutionQueryURL, mocks.Response{fullResolutionResponse, 0, http.StatusOK})
	defaultClientConfig.HttpClient = fakeHttpClient
	defaultClientConfig.TimeSource = timeSource

	b := NewBlueflood(defaultClientConfig)
	if err != nil {
		t.Fatalf("timerange error: %s", err.Error())
	}

	userConfig := api.UserSpecifiableConfig{
		IncludeRawData: true,
	}

	timeSeries, err := b.FetchSingleTimeseries(api.FetchTimeseriesRequest{
		Metric: api.TaggedMetric{
			MetricKey: api.MetricKey("some.key"),
			TagSet:    api.ParseTagSet("tag=value"),
		},
		SampleMethod:          api.SampleMean,
		Timerange:             queryTimerange,
		MetricMetadata:        fakeApi,
		Cancellable:           api.NewCancellable(),
		UserSpecifiableConfig: userConfig,
	})
	if err != nil {
		t.Fatalf("Expected success, but got error: %s", err.Error())
	}

	if timeSeries.Raw == nil || string(timeSeries.Raw[0]) != regularResponse {
		t.Fatalf("Didn't fill in the raw result correctly, got: %s\n", string(timeSeries.Raw[0]))
	}
}