Example #1
0
// fetchManyLazy abstracts upon fetchLazy so that looping over the resulting channels is not needed.
// It returns any overall error, as well as a slice of the resulting timeseries.
func (m *parallelMultiBackend) fetchManyLazy(cancellable api.Cancellable, works []func() (api.Timeseries, error)) ([]api.Timeseries, error) {
	results := make([]api.Timeseries, len(works))
	channel := make(chan error, len(works)) // Buffering the channel means the goroutines won't need to wait.
	for i := range results {
		m.fetchLazy(cancellable, &results[i], works[i], channel)
	}
	var err error = nil
	for _ = range works {
		select {
		case thisErr := <-channel:
			if thisErr != nil {
				err = thisErr
			}
		case <-cancellable.Done():
			return nil, api.BackendError{
				api.TaggedMetric{},
				api.FetchTimeoutError,
				"",
			}
		}
	}
	if err != nil {
		return nil, err
	}
	return results, nil
}
Example #2
0
// Execute performs the query represented by the given query string, and returs the result.
func (cmd *SelectCommand) Execute(context ExecutionContext) (interface{}, error) {
	timerange, err := api.NewSnappedTimerange(cmd.context.Start, cmd.context.End, cmd.context.Resolution)
	if err != nil {
		return nil, err
	}
	hasTimeout := context.Timeout != 0
	var cancellable api.Cancellable
	if hasTimeout {
		cancellable = api.NewTimeoutCancellable(time.Now().Add(context.Timeout))
	} else {
		cancellable = api.NewCancellable()
	}
	r := context.Registry
	if r == nil {
		r = registry.Default()
	}

	defer close(cancellable.Done()) // broadcast the finish - this ensures that the future work is cancelled.
	evaluationContext := function.EvaluationContext{
		API:          context.API,
		FetchLimit:   function.NewFetchCounter(context.FetchLimit),
		MultiBackend: context.Backend,
		Predicate:    cmd.predicate,
		SampleMethod: cmd.context.SampleMethod,
		Timerange:    timerange,
		Cancellable:  cancellable,
		Profiler:     context.Profiler,
		Registry:     r,
	}
	if hasTimeout {
		timeout := time.After(context.Timeout)
		results := make(chan interface{})
		errors := make(chan error)
		go func() {
			result, err := evaluateExpressions(evaluationContext, cmd.expressions)
			if err != nil {
				errors <- err
			} else {
				results <- result
			}
		}()
		select {
		case <-timeout:
			return nil, fmt.Errorf("Timeout while executing the query.") // timeout.
		case result := <-results:
			return result, nil
		case err := <-errors:
			return nil, err
		}
	} else {
		return evaluateExpressions(evaluationContext, cmd.expressions)
	}
}
Example #3
0
func (b *Blueflood) fetchLazy(cancellable api.Cancellable, result *api.Timeseries, work func() (api.Timeseries, error), channel chan error, ctx BluefloodParallelRequest) {
	go func() {
		select {
		case ticket := <-ctx.tickets:
			series, err := work()
			// Put the ticket back (regardless of whether caller drops)
			ctx.tickets <- ticket
			// Store the result
			*result = series
			// Return the error (and sync up with the caller).
			channel <- err
		case <-cancellable.Done():
			channel <- api.TimeseriesStorageError{
				api.TaggedMetric{},
				api.FetchTimeoutError,
				"",
			}
		}
	}()
}
Example #4
0
func (b *Blueflood) fetchManyLazy(cancellable api.Cancellable, works []func() (api.Timeseries, error)) ([]api.Timeseries, error) {
	results := make([]api.Timeseries, len(works))
	channel := make(chan error, len(works)) // Buffering the channel means the goroutines won't need to wait.

	limit := b.config.MaxSimultaneousRequests
	tickets := make(chan struct{}, limit)
	for i := 0; i < limit; i++ {
		tickets <- struct{}{}
	}
	ctx := BluefloodParallelRequest{
		tickets: tickets,
	}
	for i := range results {
		b.fetchLazy(cancellable, &results[i], works[i], channel, ctx)
	}

	var err error = nil
	for _ = range works {
		select {
		case thisErr := <-channel:
			if thisErr != nil {
				err = thisErr
			}
		case <-cancellable.Done():
			return nil, api.TimeseriesStorageError{
				api.TaggedMetric{},
				api.FetchTimeoutError,
				"",
			}
		}
	}
	if err != nil {
		return nil, err
	}
	return results, nil
}
Example #5
0
// Execute performs the query represented by the given query string, and returs the result.
func (cmd *SelectCommand) Execute(context ExecutionContext) (interface{}, error) {
	timerange, err := api.NewSnappedTimerange(cmd.context.Start, cmd.context.End, cmd.context.Resolution)
	if err != nil {
		return nil, err
	}
	slotLimit := context.SlotLimit
	defaultLimit := 1000
	if slotLimit == 0 {
		slotLimit = defaultLimit // the default limit
	}

	smallestResolution := timerange.Duration() / time.Duration(slotLimit-2)
	// ((end + res/2) - (start - res/2)) / res + 1 <= slots // make adjustments for a snap that moves the endpoints
	// (do some algebra)
	// (end - start + res) + res <= slots * res
	// end - start <= res * (slots - 2)
	// so
	// res >= (end - start) / (slots - 2)

	// Update the timerange by applying the insights of the storage API:
	chosenResolution := context.TimeseriesStorageAPI.ChooseResolution(timerange, smallestResolution)

	chosenTimerange, err := api.NewSnappedTimerange(timerange.Start(), timerange.End(), int64(chosenResolution/time.Millisecond))
	if err != nil {
		return nil, err
	}

	if chosenTimerange.Slots() > slotLimit {
		return nil, function.NewLimitError(
			"Requested number of data points exceeds the configured limit",
			chosenTimerange.Slots(), slotLimit)
	}
	hasTimeout := context.Timeout != 0
	var cancellable api.Cancellable
	if hasTimeout {
		cancellable = api.NewTimeoutCancellable(time.Now().Add(context.Timeout))
	} else {
		cancellable = api.NewCancellable()
	}
	r := context.Registry
	if r == nil {
		r = registry.Default()
	}

	defer close(cancellable.Done()) // broadcast the finish - this ensures that the future work is cancelled.
	evaluationContext := function.EvaluationContext{
		MetricMetadataAPI:         context.MetricMetadataAPI,
		FetchLimit:                function.NewFetchCounter(context.FetchLimit),
		TimeseriesStorageAPI:      context.TimeseriesStorageAPI,
		Predicate:                 cmd.predicate,
		SampleMethod:              cmd.context.SampleMethod,
		Timerange:                 timerange,
		Cancellable:               cancellable,
		Registry:                  r,
		Profiler:                  context.Profiler,
		OptimizationConfiguration: context.OptimizationConfiguration,
	}

	if hasTimeout {
		timeout := time.After(context.Timeout)
		results := make(chan interface{})
		errors := make(chan error)
		go func() {
			result, err := function.EvaluateMany(evaluationContext, cmd.expressions)
			if err != nil {
				errors <- err
			} else {
				results <- result
			}
		}()
		select {
		case <-timeout:
			return nil, function.NewLimitError("Timeout while executing the query.",
				context.Timeout, context.Timeout)
		case result := <-results:
			return result, nil
		case err := <-errors:
			return nil, err
		}
	} else {
		values, err := function.EvaluateMany(evaluationContext, cmd.expressions)
		if err != nil {
			return nil, err
		}
		lists := make([]api.SeriesList, len(values))
		for i := range values {
			lists[i], err = values[i].ToSeriesList(evaluationContext.Timerange)
			if err != nil {
				return nil, err
			}
		}
		return lists, nil
	}
}
Example #6
0
// Execute performs the query represented by the given query string, and returs the result.
func (cmd *SelectCommand) Execute(context ExecutionContext) (interface{}, error) {
	timerange, err := api.NewSnappedTimerange(cmd.context.Start, cmd.context.End, cmd.context.Resolution)
	if err != nil {
		return nil, err
	}
	slotLimit := context.SlotLimit
	defaultLimit := 1000
	if slotLimit == 0 {
		slotLimit = defaultLimit // the default limit
	}
	if timerange.Slots() > slotLimit {
		return nil, function.NewLimitError(
			"Requested number of data points exceeds the configured limit",
			timerange.Slots(), slotLimit)
	}
	hasTimeout := context.Timeout != 0
	var cancellable api.Cancellable
	if hasTimeout {
		cancellable = api.NewTimeoutCancellable(time.Now().Add(context.Timeout))
	} else {
		cancellable = api.NewCancellable()
	}
	r := context.Registry
	if r == nil {
		r = registry.Default()
	}

	defer close(cancellable.Done()) // broadcast the finish - this ensures that the future work is cancelled.
	evaluationContext := function.EvaluationContext{
		API:          context.API,
		FetchLimit:   function.NewFetchCounter(context.FetchLimit),
		MultiBackend: context.Backend,
		Predicate:    cmd.predicate,
		SampleMethod: cmd.context.SampleMethod,
		Timerange:    timerange,
		Cancellable:  cancellable,
		Profiler:     context.Profiler,
		Registry:     r,
	}
	if hasTimeout {
		timeout := time.After(context.Timeout)
		results := make(chan interface{})
		errors := make(chan error)
		go func() {
			result, err := function.EvaluateMany(evaluationContext, cmd.expressions)
			if err != nil {
				errors <- err
			} else {
				results <- result
			}
		}()
		select {
		case <-timeout:
			return nil, function.NewLimitError("Timeout while executing the query.",
				context.Timeout, context.Timeout)
		case result := <-results:
			return result, nil
		case err := <-errors:
			return nil, err
		}
	} else {
		values, err := function.EvaluateMany(evaluationContext, cmd.expressions)
		if err != nil {
			return nil, err
		}
		lists := make([]api.SeriesList, len(values))
		for i := range values {
			lists[i], err = values[i].ToSeriesList(evaluationContext.Timerange)
			if err != nil {
				return nil, err
			}
		}
		return lists, nil
	}
}
Example #7
0
// Execute performs the query represented by the given query string, and returs the result.
func (cmd *SelectCommand) Execute(context ExecutionContext) (CommandResult, error) {
	userTimerange, err := api.NewSnappedTimerange(cmd.context.Start, cmd.context.End, cmd.context.Resolution)
	if err != nil {
		return CommandResult{}, err
	}
	slotLimit := context.SlotLimit
	defaultLimit := 1000
	if slotLimit == 0 {
		slotLimit = defaultLimit // the default limit
	}

	smallestResolution := userTimerange.Duration() / time.Duration(slotLimit-2)
	// ((end + res/2) - (start - res/2)) / res + 1 <= slots // make adjustments for a snap that moves the endpoints
	// (do some algebra)
	// (end - start + res) + res <= slots * res
	// end - start <= res * (slots - 2)
	// so
	// res >= (end - start) / (slots - 2)

	// Update the timerange by applying the insights of the storage API:
	chosenResolution := context.TimeseriesStorageAPI.ChooseResolution(userTimerange, smallestResolution)

	chosenTimerange, err := api.NewSnappedTimerange(userTimerange.Start(), userTimerange.End(), int64(chosenResolution/time.Millisecond))
	if err != nil {
		return CommandResult{}, err
	}

	if chosenTimerange.Slots() > slotLimit {
		return CommandResult{}, function.NewLimitError(
			"Requested number of data points exceeds the configured limit",
			chosenTimerange.Slots(), slotLimit)
	}
	hasTimeout := context.Timeout != 0
	var cancellable api.Cancellable
	if hasTimeout {
		cancellable = api.NewTimeoutCancellable(time.Now().Add(context.Timeout))
	} else {
		cancellable = api.NewCancellable()
	}
	r := context.Registry
	if r == nil {
		r = registry.Default()
	}

	defer close(cancellable.Done()) // broadcast the finish - this ensures that the future work is cancelled.
	evaluationContext := function.EvaluationContext{
		MetricMetadataAPI:         context.MetricMetadataAPI,
		FetchLimit:                function.NewFetchCounter(context.FetchLimit),
		TimeseriesStorageAPI:      context.TimeseriesStorageAPI,
		Predicate:                 cmd.predicate,
		SampleMethod:              cmd.context.SampleMethod,
		Timerange:                 chosenTimerange,
		Cancellable:               cancellable,
		Registry:                  r,
		Profiler:                  context.Profiler,
		OptimizationConfiguration: context.OptimizationConfiguration,
		EvaluationNotes:           []string{},
		UserSpecifiableConfig:     context.UserSpecifiableConfig,
	}

	timeout := (<-chan time.Time)(nil)
	if hasTimeout {
		// A nil channel will just block forever
		timeout = time.After(context.Timeout)
	}

	results := make(chan []function.Value, 1)
	errors := make(chan error, 1)
	// Goroutines are never garbage collected, so we need to provide capacity so that the send always succeeds.
	go func() {
		// Evaluate the result, and send it along the goroutines.
		result, err := function.EvaluateMany(&evaluationContext, cmd.expressions)
		if err != nil {
			errors <- err
			return
		}
		results <- result
	}()
	select {
	case <-timeout:
		return CommandResult{}, function.NewLimitError("Timeout while executing the query.",
			context.Timeout, context.Timeout)
	case err := <-errors:
		return CommandResult{}, err
	case result := <-results:
		lists := make([]api.SeriesList, len(result))
		for i := range result {
			lists[i], err = result[i].ToSeriesList(evaluationContext.Timerange)
			if err != nil {
				return CommandResult{}, err
			}
		}
		description := map[string][]string{}
		for _, list := range lists {
			for _, series := range list.Series {
				for key, value := range series.TagSet {
					description[key] = append(description[key], value)
				}
			}
		}
		for key, values := range description {
			natural_sort.Sort(values)
			filtered := []string{}
			for i := range values {
				if i == 0 || values[i-1] != values[i] {
					filtered = append(filtered, values[i])
				}
			}
			description[key] = filtered
		}
		return CommandResult{
			Body: lists,
			Metadata: map[string]interface{}{
				"description": description,
				"notes":       evaluationContext.EvaluationNotes,
			},
		}, nil
	}
}