Ejemplo n.º 1
0
// createVarRefIterator creates an iterator for a variable reference.
func (e *Engine) createVarRefIterator(opt influxql.IteratorOptions) ([]influxql.Iterator, error) {
	ref, _ := opt.Expr.(*influxql.VarRef)

	var itrs []influxql.Iterator
	if err := func() error {
		mms := tsdb.Measurements(e.index.MeasurementsByName(influxql.Sources(opt.Sources).Names()))

		// Retrieve the maximum number of fields (without time).
		conditionFields := make([]string, len(influxql.ExprNames(opt.Condition)))

		for _, mm := range mms {
			// Determine tagsets for this measurement based on dimensions and filters.
			tagSets, err := mm.TagSets(opt.Dimensions, opt.Condition)
			if err != nil {
				return err
			}

			// Calculate tag sets and apply SLIMIT/SOFFSET.
			tagSets = influxql.LimitTagSets(tagSets, opt.SLimit, opt.SOffset)

			for _, t := range tagSets {
				for i, seriesKey := range t.SeriesKeys {
					fields := 0
					if t.Filters[i] != nil {
						// Retrieve non-time fields from this series filter and filter out tags.
						for _, f := range influxql.ExprNames(t.Filters[i]) {
							if mm.HasField(f) {
								conditionFields[fields] = f
								fields++
							}
						}
					}

					itr, err := e.createVarRefSeriesIterator(ref, mm, seriesKey, t, t.Filters[i], conditionFields[:fields], opt)
					if err != nil {
						return err
					} else if itr == nil {
						continue
					}
					itrs = append(itrs, itr)
				}
			}
		}
		return nil
	}(); err != nil {
		influxql.Iterators(itrs).Close()
		return nil, err
	}

	return itrs, nil
}
Ejemplo n.º 2
0
// ReadAll reads all points from all iterators.
func (itrs Iterators) ReadAll() [][]influxql.Point {
	var a [][]influxql.Point

	// Read from every iterator until a nil is encountered.
	for {
		points := itrs.Next()
		if points == nil {
			break
		}
		a = append(a, points)
	}

	// Close all iterators.
	influxql.Iterators(itrs).Close()

	return a
}
Ejemplo n.º 3
0
// createTagKeysIterator returns an iterator for all tag keys across measurements.
func (a Shards) createTagKeysIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) {
	itrs := make([]influxql.Iterator, 0, len(a))
	if err := func() error {
		for _, sh := range a {
			itr, err := NewTagKeysIterator(sh, opt)
			if err != nil {
				return err
			}
			itrs = append(itrs, itr)
		}
		return nil
	}(); err != nil {
		influxql.Iterators(itrs).Close()
		return nil, err
	}
	return influxql.NewMergeIterator(itrs, opt), nil
}
Ejemplo n.º 4
0
// createVarRefIterator creates an iterator for a variable reference.
func (e *Engine) createVarRefIterator(opt influxql.IteratorOptions) ([]influxql.Iterator, error) {
	ref, _ := opt.Expr.(*influxql.VarRef)

	var itrs []influxql.Iterator
	if err := func() error {
		mms := tsdb.Measurements(e.index.MeasurementsByName(influxql.Sources(opt.Sources).Names()))

		for _, mm := range mms {
			// Determine tagsets for this measurement based on dimensions and filters.
			tagSets, err := mm.TagSets(opt.Dimensions, opt.Condition)
			if err != nil {
				return err
			}

			// Calculate tag sets and apply SLIMIT/SOFFSET.
			tagSets = influxql.LimitTagSets(tagSets, opt.SLimit, opt.SOffset)

			for _, t := range tagSets {
				inputs, err := e.createTagSetIterators(ref, mm, t, opt)
				if err != nil {
					return err
				}

				if len(inputs) > 0 && (opt.Limit > 0 || opt.Offset > 0) {
					var itr influxql.Iterator
					if opt.MergeSorted() {
						itr = influxql.NewSortedMergeIterator(inputs, opt)
					} else {
						itr = influxql.NewMergeIterator(inputs, opt)
					}
					itrs = append(itrs, newLimitIterator(itr, opt))
				} else {
					itrs = append(itrs, inputs...)
				}
			}
		}
		return nil
	}(); err != nil {
		influxql.Iterators(itrs).Close()
		return nil, err
	}

	return itrs, nil
}
Ejemplo n.º 5
0
// ReadAll reads all points from all iterators.
func (itrs Iterators) ReadAll() ([][]influxql.Point, error) {
	var a [][]influxql.Point

	// Read from every iterator until a nil is encountered.
	for {
		points, err := itrs.Next()
		if err != nil {
			return nil, err
		} else if points == nil {
			break
		}
		a = append(a, influxql.Points(points).Clone())
	}

	// Close all iterators.
	influxql.Iterators(itrs).Close()

	return a, nil
}
Ejemplo n.º 6
0
// CreateIterator returns a single combined iterator for the shards.
func (a Shards) CreateIterator(opt influxql.IteratorOptions) (influxql.Iterator, error) {
	if influxql.Sources(opt.Sources).HasSystemSource() {
		return a.createSystemIterator(opt)
	}

	// Create iterators for each shard.
	// Ensure that they are closed if an error occurs.
	itrs := make([]influxql.Iterator, 0, len(a))
	if err := func() error {
		for _, sh := range a {
			itr, err := sh.CreateIterator(opt)
			if err != nil {
				return err
			}
			itrs = append(itrs, itr)
		}
		return nil
	}(); err != nil {
		influxql.Iterators(itrs).Close()
		return nil, err
	}

	// Merge into a single iterator.
	if opt.MergeSorted() {
		return influxql.NewSortedMergeIterator(itrs, opt), nil
	}

	itr := influxql.NewMergeIterator(itrs, opt)
	if opt.Expr != nil {
		if expr, ok := opt.Expr.(*influxql.Call); ok && expr.Name == "count" {
			opt.Expr = &influxql.Call{
				Name: "sum",
				Args: expr.Args,
			}
		}
	}
	return influxql.NewCallIterator(itr, opt), nil
}
Ejemplo n.º 7
0
// createTagSetIterators creates a set of iterators for a tagset.
func (e *Engine) createTagSetIterators(ref *influxql.VarRef, mm *tsdb.Measurement, t *influxql.TagSet, opt influxql.IteratorOptions) ([]influxql.Iterator, error) {
	// Set parallelism by number of logical cpus.
	parallelism := runtime.GOMAXPROCS(0)
	if parallelism > len(t.SeriesKeys) {
		parallelism = len(t.SeriesKeys)
	}

	// Create series key groupings w/ return error.
	groups := make([]struct {
		keys    []string
		filters []influxql.Expr
		itrs    []influxql.Iterator
		err     error
	}, parallelism)

	// Group series keys.
	n := len(t.SeriesKeys) / parallelism
	for i := 0; i < parallelism; i++ {
		group := &groups[i]

		if i < parallelism-1 {
			group.keys = t.SeriesKeys[i*n : (i+1)*n]
			group.filters = t.Filters[i*n : (i+1)*n]
		} else {
			group.keys = t.SeriesKeys[i*n:]
			group.filters = t.Filters[i*n:]
		}

		group.itrs = make([]influxql.Iterator, 0, len(group.keys))
	}

	// Read series groups in parallel.
	var wg sync.WaitGroup
	for i := range groups {
		wg.Add(1)
		go func(i int) {
			defer wg.Done()
			groups[i].itrs, groups[i].err = e.createTagSetGroupIterators(ref, mm, groups[i].keys, t, groups[i].filters, opt)
		}(i)
	}
	wg.Wait()

	// Determine total number of iterators so we can allocate only once.
	var itrN int
	for _, group := range groups {
		itrN += len(group.itrs)
	}

	// Combine all iterators together and check for errors.
	var err error
	itrs := make([]influxql.Iterator, 0, itrN)
	for _, group := range groups {
		if group.err != nil {
			err = group.err
		}
		itrs = append(itrs, group.itrs...)
	}

	// If an error occurred, make sure we close all created iterators.
	if err != nil {
		influxql.Iterators(itrs).Close()
		return nil, err
	}

	return itrs, nil
}
Ejemplo n.º 8
0
func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext) error {
	// Handle SHOW TAG VALUES separately so it can be optimized.
	// https://github.com/influxdata/influxdb/issues/6233
	if source, ok := stmt.Sources[0].(*influxql.Measurement); ok && source.Name == "_tags" {
		// Use the optimized version only if we have direct access to the database.
		if store, ok := e.TSDBStore.(LocalTSDBStore); ok {
			return e.executeShowTagValues(stmt, ctx, store)
		}
	}

	// It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now`
	now := time.Now().UTC()
	opt := influxql.SelectOptions{InterruptCh: ctx.InterruptCh}

	// Replace instances of "now()" with the current time, and check the resultant times.
	nowValuer := influxql.NowValuer{Now: now}
	stmt.Condition = influxql.Reduce(stmt.Condition, &nowValuer)
	// Replace instances of "now()" with the current time in the dimensions.
	for _, d := range stmt.Dimensions {
		d.Expr = influxql.Reduce(d.Expr, &nowValuer)
	}

	var err error
	opt.MinTime, opt.MaxTime, err = influxql.TimeRange(stmt.Condition)
	if err != nil {
		return err
	}

	if opt.MaxTime.IsZero() {
		// In the case that we're executing a meta query where the user cannot
		// specify a time condition, then we expand the default max time
		// to the maximum possible value, to ensure that data where all points
		// are in the future are returned.
		if influxql.Sources(stmt.Sources).HasSystemSource() {
			opt.MaxTime = time.Unix(0, influxql.MaxTime).UTC()
		} else {
			opt.MaxTime = now
		}
	}
	if opt.MinTime.IsZero() {
		opt.MinTime = time.Unix(0, 0)
	}

	// Convert DISTINCT into a call.
	stmt.RewriteDistinct()

	// Remove "time" from fields list.
	stmt.RewriteTimeFields()

	// Create an iterator creator based on the shards in the cluster.
	ic, err := e.iteratorCreator(stmt, &opt)
	if err != nil {
		return err
	}

	// Expand regex sources to their actual source names.
	if stmt.Sources.HasRegex() {
		sources, err := ic.ExpandSources(stmt.Sources)
		if err != nil {
			return err
		}
		stmt.Sources = sources
	}

	// Rewrite wildcards, if any exist.
	tmp, err := stmt.RewriteFields(ic)
	if err != nil {
		return err
	}
	stmt = tmp

	if e.MaxSelectBucketsN > 0 && !stmt.IsRawQuery {
		interval, err := stmt.GroupByInterval()
		if err != nil {
			return err
		}

		if interval > 0 {
			// Determine the start and end time matched to the interval (may not match the actual times).
			min := opt.MinTime.Truncate(interval)
			max := opt.MaxTime.Truncate(interval).Add(interval)

			// Determine the number of buckets by finding the time span and dividing by the interval.
			buckets := int64(max.Sub(min)) / int64(interval)
			if int(buckets) > e.MaxSelectBucketsN {
				return fmt.Errorf("max select bucket count exceeded: %d buckets", buckets)
			}
		}
	}

	// Create a set of iterators from a selection.
	itrs, err := influxql.Select(stmt, ic, &opt)
	if err != nil {
		return err
	}

	if e.MaxSelectPointN > 0 {
		monitor := influxql.PointLimitMonitor(itrs, influxql.DefaultStatsInterval, e.MaxSelectPointN)
		ctx.Query.Monitor(monitor)
	}

	// Generate a row emitter from the iterator set.
	em := influxql.NewEmitter(itrs, stmt.TimeAscending(), ctx.ChunkSize)
	em.Columns = stmt.ColumnNames()
	em.OmitTime = stmt.OmitTime
	defer em.Close()

	// Calculate initial stats across all iterators.
	stats := influxql.Iterators(itrs).Stats()
	if e.MaxSelectSeriesN > 0 && stats.SeriesN > e.MaxSelectSeriesN {
		return fmt.Errorf("max select series count exceeded: %d series", stats.SeriesN)
	}

	// Emit rows to the results channel.
	var writeN int64
	var emitted bool

	var pointsWriter *BufferedPointsWriter
	if stmt.Target != nil {
		pointsWriter = NewBufferedPointsWriter(e.PointsWriter, stmt.Target.Measurement.Database, stmt.Target.Measurement.RetentionPolicy, 10000)
	}

	for {
		row, err := em.Emit()
		if err != nil {
			return err
		} else if row == nil {
			// Check if the query was interrupted while emitting.
			select {
			case <-ctx.InterruptCh:
				return influxql.ErrQueryInterrupted
			default:
			}
			break
		}

		// Write points back into system for INTO statements.
		if stmt.Target != nil {
			if err := e.writeInto(pointsWriter, stmt, row); err != nil {
				return err
			}
			writeN += int64(len(row.Values))
			continue
		}

		result := &influxql.Result{
			StatementID: ctx.StatementID,
			Series:      []*models.Row{row},
		}

		// Send results or exit if closing.
		select {
		case <-ctx.InterruptCh:
			return influxql.ErrQueryInterrupted
		case ctx.Results <- result:
		}

		emitted = true
	}

	// Flush remaing points and emit write count if an INTO statement.
	if stmt.Target != nil {
		if err := pointsWriter.Flush(); err != nil {
			return err
		}

		var messages []*influxql.Message
		if ctx.ReadOnly {
			messages = append(messages, influxql.ReadOnlyWarning(stmt.String()))
		}

		ctx.Results <- &influxql.Result{
			StatementID: ctx.StatementID,
			Messages:    messages,
			Series: []*models.Row{{
				Name:    "result",
				Columns: []string{"time", "written"},
				Values:  [][]interface{}{{time.Unix(0, 0).UTC(), writeN}},
			}},
		}
		return nil
	}

	// Always emit at least one result.
	if !emitted {
		ctx.Results <- &influxql.Result{
			StatementID: ctx.StatementID,
			Series:      make([]*models.Row, 0),
		}
	}

	return nil
}
Ejemplo n.º 9
0
func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext) error {
	// It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now`
	now := time.Now().UTC()
	opt := influxql.SelectOptions{InterruptCh: ctx.InterruptCh}

	// Replace instances of "now()" with the current time, and check the resultant times.
	stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: now})
	var err error
	opt.MinTime, opt.MaxTime, err = influxql.TimeRange(stmt.Condition)
	if err != nil {
		return err
	}

	if opt.MaxTime.IsZero() {
		opt.MaxTime = now
	}
	if opt.MinTime.IsZero() {
		opt.MinTime = time.Unix(0, 0)
	}

	// Convert DISTINCT into a call.
	stmt.RewriteDistinct()

	// Remove "time" from fields list.
	stmt.RewriteTimeFields()

	// Create an iterator creator based on the shards in the cluster.
	ic, err := e.iteratorCreator(stmt, &opt)
	if err != nil {
		return err
	}

	// Expand regex sources to their actual source names.
	if stmt.Sources.HasRegex() {
		sources, err := ic.ExpandSources(stmt.Sources)
		if err != nil {
			return err
		}
		stmt.Sources = sources
	}

	// Rewrite wildcards, if any exist.
	tmp, err := stmt.RewriteWildcards(ic)
	if err != nil {
		return err
	}
	stmt = tmp

	if e.MaxSelectBucketsN > 0 && !stmt.IsRawQuery {
		interval, err := stmt.GroupByInterval()
		if err != nil {
			return err
		}

		if interval > 0 {
			// Determine the start and end time matched to the interval (may not match the actual times).
			min := opt.MinTime.Truncate(interval)
			max := opt.MaxTime.Truncate(interval).Add(interval)

			// Determine the number of buckets by finding the time span and dividing by the interval.
			buckets := int64(max.Sub(min)) / int64(interval)
			if int(buckets) > e.MaxSelectBucketsN {
				return fmt.Errorf("max select bucket count exceeded: %d buckets", buckets)
			}
		}
	}

	// Create a set of iterators from a selection.
	itrs, err := influxql.Select(stmt, ic, &opt)
	if err != nil {
		return err
	}

	if e.MaxSelectPointN > 0 {
		monitor := influxql.PointLimitMonitor(itrs, influxql.DefaultStatsInterval, e.MaxSelectPointN)
		ctx.Query.Monitor(monitor)
	}

	// Generate a row emitter from the iterator set.
	em := influxql.NewEmitter(itrs, stmt.TimeAscending(), ctx.ChunkSize)
	em.Columns = stmt.ColumnNames()
	em.OmitTime = stmt.OmitTime
	defer em.Close()

	// Calculate initial stats across all iterators.
	stats := influxql.Iterators(itrs).Stats()
	if e.MaxSelectSeriesN > 0 && stats.SeriesN > e.MaxSelectSeriesN {
		return fmt.Errorf("max select series count exceeded: %d series", stats.SeriesN)
	}

	// Emit rows to the results channel.
	var writeN int64
	var emitted bool
	for {
		row := em.Emit()
		if row == nil {
			// Check if the query was interrupted while emitting.
			select {
			case <-ctx.InterruptCh:
				return influxql.ErrQueryInterrupted
			default:
			}
			break
		}

		result := &influxql.Result{
			StatementID: ctx.StatementID,
			Series:      []*models.Row{row},
		}

		// Write points back into system for INTO statements.
		if stmt.Target != nil {
			if err := e.writeInto(stmt, row); err != nil {
				return err
			}
			writeN += int64(len(row.Values))
			continue
		}

		// Send results or exit if closing.
		select {
		case <-ctx.InterruptCh:
			return influxql.ErrQueryInterrupted
		case ctx.Results <- result:
		}

		emitted = true
	}

	// Emit write count if an INTO statement.
	if stmt.Target != nil {
		ctx.Results <- &influxql.Result{
			StatementID: ctx.StatementID,
			Series: []*models.Row{{
				Name:    "result",
				Columns: []string{"time", "written"},
				Values:  [][]interface{}{{time.Unix(0, 0).UTC(), writeN}},
			}},
		}
		return nil
	}

	// Always emit at least one result.
	if !emitted {
		ctx.Results <- &influxql.Result{
			StatementID: ctx.StatementID,
			Series:      make([]*models.Row, 0),
		}
	}

	return nil
}
Ejemplo n.º 10
0
func (e *StatementExecutor) executeSelectStatement(stmt *influxql.SelectStatement, ctx *influxql.ExecutionContext) error {
	// Handle SHOW TAG VALUES separately so it can be optimized.
	// https://github.com/influxdata/influxdb/issues/6233
	if source, ok := stmt.Sources[0].(*influxql.Measurement); ok && source.Name == "_tags" {
		// Use the optimized version only if we have direct access to the database.
		if store, ok := e.TSDBStore.(LocalTSDBStore); ok {
			return e.executeShowTagValues(stmt, ctx, store)
		}
	}

	itrs, stmt, err := e.createIterators(stmt, ctx)
	if err != nil {
		return err
	}

	// Generate a row emitter from the iterator set.
	em := influxql.NewEmitter(itrs, stmt.TimeAscending(), ctx.ChunkSize)
	em.Columns = stmt.ColumnNames()
	em.OmitTime = stmt.OmitTime
	defer em.Close()

	// Calculate initial stats across all iterators.
	stats := influxql.Iterators(itrs).Stats()
	if e.MaxSelectSeriesN > 0 && stats.SeriesN > e.MaxSelectSeriesN {
		return fmt.Errorf("max select series count exceeded: %d series", stats.SeriesN)
	}

	// Emit rows to the results channel.
	var writeN int64
	var emitted bool

	var pointsWriter *BufferedPointsWriter
	if stmt.Target != nil {
		pointsWriter = NewBufferedPointsWriter(e.PointsWriter, stmt.Target.Measurement.Database, stmt.Target.Measurement.RetentionPolicy, 10000)
	}

	for {
		row, err := em.Emit()
		if err != nil {
			return err
		} else if row == nil {
			// Check if the query was interrupted while emitting.
			select {
			case <-ctx.InterruptCh:
				return influxql.ErrQueryInterrupted
			default:
			}
			break
		}

		// Write points back into system for INTO statements.
		if stmt.Target != nil {
			if err := e.writeInto(pointsWriter, stmt, row); err != nil {
				return err
			}
			writeN += int64(len(row.Values))
			continue
		}

		result := &influxql.Result{
			StatementID: ctx.StatementID,
			Series:      []*models.Row{row},
		}

		// Send results or exit if closing.
		select {
		case <-ctx.InterruptCh:
			return influxql.ErrQueryInterrupted
		case ctx.Results <- result:
		}

		emitted = true
	}

	// Flush remaing points and emit write count if an INTO statement.
	if stmt.Target != nil {
		if err := pointsWriter.Flush(); err != nil {
			return err
		}

		var messages []*influxql.Message
		if ctx.ReadOnly {
			messages = append(messages, influxql.ReadOnlyWarning(stmt.String()))
		}

		ctx.Results <- &influxql.Result{
			StatementID: ctx.StatementID,
			Messages:    messages,
			Series: []*models.Row{{
				Name:    "result",
				Columns: []string{"time", "written"},
				Values:  [][]interface{}{{time.Unix(0, 0).UTC(), writeN}},
			}},
		}
		return nil
	}

	// Always emit at least one result.
	if !emitted {
		ctx.Results <- &influxql.Result{
			StatementID: ctx.StatementID,
			Series:      make([]*models.Row, 0),
		}
	}

	return nil
}
Ejemplo n.º 11
0
func (e *QueryExecutor) executeSelectStatement(stmt *influxql.SelectStatement, chunkSize, statementID int, results chan *influxql.Result, closing <-chan struct{}) error {
	// It is important to "stamp" this time so that everywhere we evaluate `now()` in the statement is EXACTLY the same `now`
	now := time.Now().UTC()
	opt := influxql.SelectOptions{InterruptCh: closing}

	// Replace instances of "now()" with the current time, and check the resultant times.
	stmt.Condition = influxql.Reduce(stmt.Condition, &influxql.NowValuer{Now: now})
	opt.MinTime, opt.MaxTime = influxql.TimeRange(stmt.Condition)
	if opt.MaxTime.IsZero() {
		opt.MaxTime = now
	}
	if opt.MinTime.IsZero() {
		opt.MinTime = time.Unix(0, 0)
	}

	// Convert DISTINCT into a call.
	stmt.RewriteDistinct()

	// Remove "time" from fields list.
	stmt.RewriteTimeFields()

	// Create an iterator creator based on the shards in the cluster.
	ic, err := e.iteratorCreator(stmt, &opt)
	if err != nil {
		return err
	}

	// Expand regex sources to their actual source names.
	if stmt.Sources.HasRegex() {
		sources, err := ic.ExpandSources(stmt.Sources)
		if err != nil {
			return err
		}
		stmt.Sources = sources
	}

	// Rewrite wildcards, if any exist.
	tmp, err := stmt.RewriteWildcards(ic)
	if err != nil {
		return err
	}
	stmt = tmp

	// Create a set of iterators from a selection.
	itrs, err := influxql.Select(stmt, ic, &opt)
	if err != nil {
		return err
	}

	// Generate a row emitter from the iterator set.
	em := influxql.NewEmitter(itrs, stmt.TimeAscending())
	em.Columns = stmt.ColumnNames()
	em.OmitTime = stmt.OmitTime
	defer em.Close()

	// Calculate initial stats across all iterators.
	stats := influxql.Iterators(itrs).Stats()
	if e.MaxSelectSeriesN > 0 && stats.SeriesN > e.MaxSelectSeriesN {
		return fmt.Errorf("max select series count exceeded: %d series", stats.SeriesN)
	}

	// Emit rows to the results channel.
	var writeN int64
	var emitted bool
	for {
		row := em.Emit()
		if row == nil {
			// Check if the query was interrupted while emitting.
			select {
			case <-closing:
				return influxql.ErrQueryInterrupted
			default:
			}
			break
		}

		result := &influxql.Result{
			StatementID: statementID,
			Series:      []*models.Row{row},
		}

		// Write points back into system for INTO statements.
		if stmt.Target != nil {
			if err := e.writeInto(stmt, row); err != nil {
				return err
			}
			writeN += int64(len(row.Values))
			continue
		}

		// Send results or exit if closing.
		select {
		case <-closing:
			return influxql.ErrQueryInterrupted
		case results <- result:
		}

		emitted = true
	}

	// Emit write count if an INTO statement.
	if stmt.Target != nil {
		results <- &influxql.Result{
			StatementID: statementID,
			Series: []*models.Row{{
				Name:    "result",
				Columns: []string{"time", "written"},
				Values:  [][]interface{}{{time.Unix(0, 0).UTC(), writeN}},
			}},
		}
		return nil
	}

	// Always emit at least one result.
	if !emitted {
		results <- &influxql.Result{
			StatementID: statementID,
			Series:      make([]*models.Row, 0),
		}
	}

	return nil
}