コード例 #1
0
ファイル: query_test.go プロジェクト: Hellblazer/cockroach
// assertQuery generates a query result from the local test model and compares
// it against the query returned from the server.
func (tm *testModel) assertQuery(name string, agg *proto.TimeSeriesQueryAggregator,
	r Resolution, start, end int64, expectedDatapointCount int, expectedSourceCount int) {
	// Query the actual server.
	q := proto.TimeSeriesQueryRequest_Query{
		Name:       name,
		Aggregator: agg,
	}
	actualDatapoints, actualSources, err := tm.DB.Query(q, r, start, end)
	if err != nil {
		tm.t.Fatal(err)
	}
	if a, e := len(actualDatapoints), expectedDatapointCount; a != e {
		tm.t.Fatalf("query expected %d datapoints, got %d", e, a)
	}
	if a, e := len(actualSources), expectedSourceCount; a != e {
		tm.t.Fatalf("query expected %d sources, got %d", e, a)
	}

	// Construct an expected result for comparison.
	var expectedDatapoints []*proto.TimeSeriesDatapoint
	expectedSources := make([]string, 0, 0)
	dataSpans := make(map[string]*dataSpan)

	// Iterate over all possible sources which may have data for this query.
	for sourceName := range tm.seenSources {
		// Iterate over all possible key times at which query data may be present.
		for time := start - (start % r.KeyDuration()); time < end; time += r.KeyDuration() {
			// Construct a key for this source/time and retrieve it from model.
			key := MakeDataKey(name, sourceName, r, time)
			value := tm.modelData[string(key)]
			if value == nil {
				continue
			}

			// Add data from the key to the correct dataSpan.
			data, err := proto.InternalTimeSeriesDataFromValue(value)
			if err != nil {
				tm.t.Fatal(err)
			}
			ds, ok := dataSpans[sourceName]
			if !ok {
				ds = &dataSpan{
					startNanos:  start - (start % r.SampleDuration()),
					sampleNanos: r.SampleDuration(),
				}
				dataSpans[sourceName] = ds
				expectedSources = append(expectedSources, sourceName)
			}
			if err := ds.addData(data); err != nil {
				tm.t.Fatal(err)
			}
		}
	}

	// Iterate over data in all dataSpans and construct expected datapoints.
	var iters unionIterator
	for _, ds := range dataSpans {
		iters = append(iters, ds.newIterator())
	}
	iters.init()
	for iters.isValid() {
		var value float64
		switch q.GetAggregator() {
		case proto.TimeSeriesQueryAggregator_AVG:
			value = iters.avg()
		case proto.TimeSeriesQueryAggregator_AVG_RATE:
			value = iters.dAvg()
		}
		expectedDatapoints = append(expectedDatapoints, &proto.TimeSeriesDatapoint{
			TimestampNanos: iters.timestamp(),
			Value:          value,
		})
		iters.advance()
	}

	sort.Strings(expectedSources)
	sort.Strings(actualSources)
	if !reflect.DeepEqual(actualSources, expectedSources) {
		tm.t.Errorf("actual source list: %v, expected: %v", actualSources, expectedSources)
	}
	if !reflect.DeepEqual(actualDatapoints, expectedDatapoints) {
		tm.t.Errorf("actual datapoints: %v, expected: %v", actualDatapoints, expectedDatapoints)
	}
}
コード例 #2
0
ファイル: query.go プロジェクト: Hellblazer/cockroach
// Query returns datapoints for the named time series during the supplied time
// span.  Data is returned as a series of consecutive data points.
//
// Data is queried only at the Resolution supplied: if data for the named time
// series is not stored at the given resolution, an empty result will be
// returned.
//
// All data stored on the server is downsampled to some degree; the data points
// returned represent the average value within a sample period. Each datapoint's
// timestamp falls in the middle of the sample period it represents.
//
// If data for the named time series was collected from multiple sources, each
// returned datapoint will represent the sum of datapoints from all sources at
// the same time. The returned string slices contains a list of all sources for
// the metric which were aggregated to produce the result.
func (db *DB) Query(query proto.TimeSeriesQueryRequest_Query, r Resolution,
	startNanos, endNanos int64) ([]*proto.TimeSeriesDatapoint, []string, error) {
	// Normalize startNanos and endNanos the nearest SampleDuration boundary.
	startNanos -= startNanos % r.SampleDuration()

	// Based on the supplied timestamps and resolution, construct start and end
	// keys for a scan that will return every key with data relevant to the
	// query.
	startKey := MakeDataKey(query.Name, "" /* source */, r, startNanos)
	endKey := MakeDataKey(query.Name, "" /* source */, r, endNanos).PrefixEnd()
	rows, err := db.db.Scan(startKey, endKey, 0)
	if err != nil {
		return nil, nil, err
	}

	// Construct a new dataSpan for each distinct source encountered in the
	// query. Each dataspan will contain all data queried from the same source.
	sourceSpans := make(map[string]*dataSpan)
	for _, row := range rows {
		data := &proto.InternalTimeSeriesData{}
		if err := row.ValueProto(data); err != nil {
			return nil, nil, err
		}

		_, source, _, _ := DecodeDataKey(row.Key)
		if _, ok := sourceSpans[source]; !ok {
			sourceSpans[source] = &dataSpan{
				startNanos:  startNanos,
				sampleNanos: data.SampleDurationNanos,
				datas:       make([]calibratedData, 0, 1),
			}
		}
		if err := sourceSpans[source].addData(data); err != nil {
			return nil, nil, err
		}
	}

	var responseData []*proto.TimeSeriesDatapoint
	sources := make([]string, 0, len(sourceSpans))

	// Create an interpolatingIterator for each dataSpan.
	iters := make(unionIterator, 0, len(sourceSpans))
	for name, span := range sourceSpans {
		sources = append(sources, name)
		iters = append(iters, span.newIterator())
	}

	// Iterate through all values in the iteratorSet, adding a datapoint to
	// the response for each value.
	var valueFn func() float64
	switch query.GetAggregator() {
	case proto.TimeSeriesQueryAggregator_AVG:
		valueFn = iters.avg
	case proto.TimeSeriesQueryAggregator_AVG_RATE:
		valueFn = iters.dAvg
	}

	iters.init()
	for iters.isValid() && iters.timestamp() <= endNanos {
		responseData = append(responseData, &proto.TimeSeriesDatapoint{
			TimestampNanos: iters.timestamp(),
			Value:          valueFn(),
		})
		iters.advance()
	}

	return responseData, sources, nil
}