Exemplo n.º 1
0
// Open opens and initializes the mapper.
func (m *RawMapper) Open() error {
	// Ignore if node has the shard but hasn't written to it yet.
	if m.shard == nil {
		return nil
	}

	// Rewrite statement.
	stmt, err := m.shard.index.RewriteSelectStatement(m.stmt)
	if err != nil {
		return err
	}
	m.stmt = stmt

	// Set all time-related parameters on the mapper.
	m.qmin, m.qmax = influxql.TimeRangeAsEpochNano(m.stmt.Condition)

	// Get a read-only transaction.
	tx, err := m.shard.engine.Begin(false)
	if err != nil {
		return err
	}
	m.tx = tx

	// Collect measurements.
	mms := Measurements(m.shard.index.MeasurementsByName(m.stmt.SourceNames()))
	m.selectFields = mms.SelectFields(m.stmt)
	m.selectTags = mms.SelectTags(m.stmt)
	m.whereFields = mms.WhereFields(m.stmt)

	// Open cursors for each measurement.
	for _, mm := range mms {
		if err := m.openMeasurement(mm); err != nil {
			return err
		}
	}

	// Remove cursors if there are not SELECT fields.
	if len(m.selectFields) == 0 {
		m.cursors = nil
	}

	return nil
}
Exemplo n.º 2
0
// Open opens and initializes the mapper.
func (m *AggregateMapper) Open() error {
	// Ignore if node has the shard but hasn't written to it yet.
	if m.shard == nil {
		return nil
	}

	// Rewrite statement.
	stmt, err := m.shard.index.RewriteSelectStatement(m.stmt)
	if err != nil {
		return err
	}
	m.stmt = stmt

	// Set all time-related parameters on the mapper.
	m.qmin, m.qmax = influxql.TimeRangeAsEpochNano(m.stmt.Condition)

	if err := m.initializeMapFunctions(); err != nil {
		return err
	}

	// For GROUP BY time queries, limit the number of data points returned by the limit and offset
	d, err := m.stmt.GroupByInterval()
	if err != nil {
		return err
	}

	m.intervalSize = d.Nanoseconds()
	if m.qmin == 0 || m.intervalSize == 0 {
		m.intervalN = 1
		m.intervalSize = m.qmax - m.qmin
	} else {
		intervalTop := m.qmax/m.intervalSize*m.intervalSize + m.intervalSize
		intervalBottom := m.qmin / m.intervalSize * m.intervalSize
		m.intervalN = int((intervalTop - intervalBottom) / m.intervalSize)
	}

	if m.stmt.Limit > 0 || m.stmt.Offset > 0 {
		// ensure that the offset isn't higher than the number of points we'd get
		if m.stmt.Offset > m.intervalN {
			return nil
		}

		// Take the lesser of either the pre computed number of GROUP BY buckets that
		// will be in the result or the limit passed in by the user
		if m.stmt.Limit < m.intervalN {
			m.intervalN = m.stmt.Limit
		}
	}

	// If we are exceeding our MaxGroupByPoints error out
	if m.intervalN > MaxGroupByPoints {
		return errors.New("too many points in the group by interval. maybe you forgot to specify a where time clause?")
	}

	// Ensure that the start time for the results is on the start of the window.
	m.qminWindow = m.qmin
	if m.intervalSize > 0 && m.intervalN > 1 {
		m.qminWindow = m.qminWindow / m.intervalSize * m.intervalSize
	}

	// Get a read-only transaction.
	tx, err := m.shard.engine.Begin(false)
	if err != nil {
		return err
	}
	m.tx = tx

	// Collect measurements.
	mms := Measurements(m.shard.index.MeasurementsByName(m.stmt.SourceNames()))
	m.selectFields = mms.SelectFields(m.stmt)
	m.selectTags = mms.SelectTags(m.stmt)
	m.whereFields = mms.WhereFields(m.stmt)

	// Open cursors for each measurement.
	for _, mm := range mms {
		if err := m.openMeasurement(mm); err != nil {
			return err
		}
	}

	return nil
}
Exemplo n.º 3
0
// Open opens the local mapper.
func (lm *SelectMapper) Open() error {
	if lm.remote != nil {
		return lm.remote.Open()
	}

	// This can happen when a shard has been assigned to this node but we have not
	// written to it so it may not exist yet.
	if lm.shard == nil {
		return nil
	}

	var err error

	// Get a read-only transaction.
	tx, err := lm.shard.engine.Begin(false)
	if err != nil {
		return err
	}
	lm.tx = tx

	if s, ok := lm.stmt.(*influxql.SelectStatement); ok {
		stmt, err := lm.rewriteSelectStatement(s)
		if err != nil {
			return err
		}
		lm.selectStmt = stmt
		lm.rawMode = (s.IsRawQuery && !s.HasDistinct()) || s.IsSimpleDerivative()
	} else {
		return lm.openMeta()
	}

	// Set all time-related parameters on the mapper.
	lm.queryTMin, lm.queryTMax = influxql.TimeRangeAsEpochNano(lm.selectStmt.Condition)

	if !lm.rawMode {
		if err := lm.initializeMapFunctions(); err != nil {
			return err
		}

		// For GROUP BY time queries, limit the number of data points returned by the limit and offset
		d, err := lm.selectStmt.GroupByInterval()
		if err != nil {
			return err
		}
		lm.intervalSize = d.Nanoseconds()
		if lm.queryTMin == 0 || lm.intervalSize == 0 {
			lm.numIntervals = 1
			lm.intervalSize = lm.queryTMax - lm.queryTMin
		} else {
			intervalTop := lm.queryTMax/lm.intervalSize*lm.intervalSize + lm.intervalSize
			intervalBottom := lm.queryTMin / lm.intervalSize * lm.intervalSize
			lm.numIntervals = int((intervalTop - intervalBottom) / lm.intervalSize)
		}

		if lm.selectStmt.Limit > 0 || lm.selectStmt.Offset > 0 {
			// ensure that the offset isn't higher than the number of points we'd get
			if lm.selectStmt.Offset > lm.numIntervals {
				return nil
			}

			// Take the lesser of either the pre computed number of GROUP BY buckets that
			// will be in the result or the limit passed in by the user
			if lm.selectStmt.Limit < lm.numIntervals {
				lm.numIntervals = lm.selectStmt.Limit
			}
		}

		// If we are exceeding our MaxGroupByPoints error out
		if lm.numIntervals > MaxGroupByPoints {
			return errors.New("too many points in the group by interval. maybe you forgot to specify a where time clause?")
		}

		// Ensure that the start time for the results is on the start of the window.
		lm.queryTMinWindow = lm.queryTMin
		if lm.intervalSize > 0 && lm.numIntervals > 1 {
			lm.queryTMinWindow = lm.queryTMinWindow / lm.intervalSize * lm.intervalSize
		}
	}

	selectFields := newStringSet()
	selectTags := newStringSet()
	whereFields := newStringSet()

	// Create the TagSet cursors for the Mapper.
	for _, src := range lm.selectStmt.Sources {
		mm, ok := src.(*influxql.Measurement)
		if !ok {
			return fmt.Errorf("invalid source type: %#v", src)
		}

		m := lm.shard.index.Measurement(mm.Name)
		if m == nil {
			// This shard have never received data for the measurement. No Mapper
			// required.
			return nil
		}

		// Validate that ANY GROUP BY is not a field for thie measurement.
		if err := m.ValidateGroupBy(lm.selectStmt); err != nil {
			return err
		}

		// Create tagset cursors and determine various field types within SELECT statement.
		tsf, err := createTagSetsAndFields(m, lm.selectStmt)
		if err != nil {
			return err
		}
		tagSets := tsf.tagSets
		selectFields.add(tsf.selectFields...)
		selectTags.add(tsf.selectTags...)
		whereFields.add(tsf.whereFields...)

		// If we only have tags in our select clause we just return
		if len(selectFields) == 0 && len(selectTags) > 0 {
			return fmt.Errorf("statement must have at least one field in select clause")
		}

		// Validate that any GROUP BY is not on a field
		if err := m.ValidateGroupBy(lm.selectStmt); err != nil {
			return err
		}

		// SLIMIT and SOFFSET the unique series
		if lm.selectStmt.SLimit > 0 || lm.selectStmt.SOffset > 0 {
			if lm.selectStmt.SOffset > len(tagSets) {
				tagSets = nil
			} else {
				if lm.selectStmt.SOffset+lm.selectStmt.SLimit > len(tagSets) {
					lm.selectStmt.SLimit = len(tagSets) - lm.selectStmt.SOffset
				}

				tagSets = tagSets[lm.selectStmt.SOffset : lm.selectStmt.SOffset+lm.selectStmt.SLimit]
			}
		}

		// For aggregate functions, we iterate the cursors in forward order but return the
		// time bucket results in reverse order.  This simplifies the aggregate code in that
		// they do not need to hand forward and revers semantics.  For raw queries, we do need
		// iterate in reverse order if using order by time desc.
		direction := Forward
		if lm.rawMode {
			direction = lm.timeDirection()
		}
		// Create all cursors for reading the data from this shard.
		for _, t := range tagSets {
			cursors := []*seriesCursor{}

			for i, key := range t.SeriesKeys {
				c := lm.tx.Cursor(key, direction)
				if c == nil {
					// No data exists for this key.
					continue
				}
				seriesTags := lm.shard.index.TagsForSeries(key)
				cm := newSeriesCursor(c, t.Filters[i], seriesTags)
				cursors = append(cursors, cm)
			}

			tsc := newTagSetCursor(m.Name, t.Tags, cursors, lm.shard.FieldCodec(m.Name))
			if lm.rawMode {
				tsc.pointHeap = newPointHeap()
				//Prime the buffers.
				for i := 0; i < len(tsc.cursors); i++ {
					var k int64
					var v []byte
					if direction.Forward() {
						k, v = tsc.cursors[i].SeekTo(lm.queryTMin)
					} else {
						k, v = tsc.cursors[i].SeekTo(lm.queryTMax)
					}

					if k == -1 {
						k, v = tsc.cursors[i].Next()
					}

					if k == -1 {
						continue
					}
					p := &pointHeapItem{
						timestamp: k,
						value:     v,
						cursor:    tsc.cursors[i],
					}
					heap.Push(tsc.pointHeap, p)
				}
			}
			lm.cursors = append(lm.cursors, tsc)
		}
		sort.Sort(tagSetCursors(lm.cursors))
	}

	lm.selectFields = selectFields.list()
	lm.selectTags = selectTags.list()
	lm.whereFields = whereFields.list()

	// If the query does not aggregate, then at least 1 SELECT field should be present.
	if lm.rawMode && len(lm.selectFields) == 0 {
		// None of the SELECT fields exist in this data. Wipe out all tagset cursors.
		lm.cursors = nil
	}

	return nil
}
Exemplo n.º 4
0
// Open opens the raw mapper.
func (rm *RawMapper) Open() error {
	// Get a read-only transaction.
	tx, err := rm.shard.DB().Begin(false)
	if err != nil {
		return err
	}
	rm.tx = tx

	// Set all time-related parameters on the mapper.
	rm.queryTMin, rm.queryTMax = influxql.TimeRangeAsEpochNano(rm.stmt.Condition)

	// Create the TagSet cursors for the Mapper.
	for _, src := range rm.stmt.Sources {
		mm, ok := src.(*influxql.Measurement)
		if !ok {
			return fmt.Errorf("invalid source type: %#v", src)
		}

		m := rm.shard.index.Measurement(mm.Name)
		if m == nil {
			// This shard have never received data for the measurement. No Mapper
			// required.
			return nil
		}

		// Create tagset cursors and determine various field types within SELECT statement.
		tsf, err := createTagSetsAndFields(m, rm.stmt)
		if err != nil {
			return err
		}
		tagSets := tsf.tagSets
		rm.selectFields = tsf.selectFields
		rm.selectTags = tsf.selectTags
		rm.whereFields = tsf.whereFields

		if len(rm.selectFields) == 0 {
			return fmt.Errorf("select statement must include at least one field")
		}

		// SLIMIT and SOFFSET the unique series
		if rm.stmt.SLimit > 0 || rm.stmt.SOffset > 0 {
			if rm.stmt.SOffset > len(tagSets) {
				tagSets = nil
			} else {
				if rm.stmt.SOffset+rm.stmt.SLimit > len(tagSets) {
					rm.stmt.SLimit = len(tagSets) - rm.stmt.SOffset
				}

				tagSets = tagSets[rm.stmt.SOffset : rm.stmt.SOffset+rm.stmt.SLimit]
			}
		}

		// Create all cursors for reading the data from this shard.
		for _, t := range tagSets {
			cursors := []*seriesCursor{}

			for i, key := range t.SeriesKeys {
				c := createCursorForSeries(rm.tx, rm.shard, key)
				if c == nil {
					// No data exists for this key.
					continue
				}
				cm := newSeriesCursor(c, t.Filters[i])
				cursors = append(cursors, cm)
			}

			tsc := newTagSetCursor(m.Name, t.Tags, cursors, rm.shard.FieldCodec(m.Name))
			// Prime the buffers.
			for i := 0; i < len(tsc.cursors); i++ {
				k, v := tsc.cursors[i].SeekTo(rm.queryTMin)
				tsc.keyBuffer[i] = k
				tsc.valueBuffer[i] = v
			}
			rm.cursors = append(rm.cursors, tsc)
		}
		sort.Sort(tagSetCursors(rm.cursors))
	}

	return nil
}
Exemplo n.º 5
0
// Open opens the aggregate mapper.
func (am *AggMapper) Open() error {
	var err error

	// Get a read-only transaction.
	tx, err := am.shard.DB().Begin(false)
	if err != nil {
		return err
	}
	am.tx = tx

	// Set up each mapping function for this statement.
	aggregates := am.stmt.FunctionCalls()
	am.mapFuncs = make([]influxql.MapFunc, len(aggregates))
	am.fieldNames = make([]string, len(am.mapFuncs))
	for i, c := range aggregates {
		am.mapFuncs[i], err = influxql.InitializeMapFunc(c)
		if err != nil {
			return err
		}

		// Check for calls like `derivative(mean(value), 1d)`
		var nested *influxql.Call = c
		if fn, ok := c.Args[0].(*influxql.Call); ok {
			nested = fn
		}
		switch lit := nested.Args[0].(type) {
		case *influxql.VarRef:
			am.fieldNames[i] = lit.Val
		case *influxql.Distinct:
			if c.Name != "count" {
				return fmt.Errorf("aggregate call didn't contain a field %s", c.String())
			}
			am.fieldNames[i] = lit.Val
		default:
			return fmt.Errorf("aggregate call didn't contain a field %s", c.String())
		}
	}

	// Set all time-related parameters on the mapper.
	am.queryTMin, am.queryTMax = influxql.TimeRangeAsEpochNano(am.stmt.Condition)

	// For GROUP BY time queries, limit the number of data points returned by the limit and offset
	d, err := am.stmt.GroupByInterval()
	if err != nil {
		return err
	}
	am.intervalSize = d.Nanoseconds()
	if am.queryTMin == 0 || am.intervalSize == 0 {
		am.numIntervals = 1
		am.intervalSize = am.queryTMax - am.queryTMin
	} else {
		intervalTop := am.queryTMax/am.intervalSize*am.intervalSize + am.intervalSize
		intervalBottom := am.queryTMin / am.intervalSize * am.intervalSize
		am.numIntervals = int((intervalTop - intervalBottom) / am.intervalSize)
	}

	if am.stmt.Limit > 0 || am.stmt.Offset > 0 {
		// ensure that the offset isn't higher than the number of points we'd get
		if am.stmt.Offset > am.numIntervals {
			return nil
		}

		// Take the lesser of either the pre computed number of GROUP BY buckets that
		// will be in the result or the limit passed in by the user
		if am.stmt.Limit < am.numIntervals {
			am.numIntervals = am.stmt.Limit
		}
	}

	// If we are exceeding our MaxGroupByPoints error out
	if am.numIntervals > MaxGroupByPoints {
		return errors.New("too many points in the group by interval. maybe you forgot to specify a where time clause?")
	}

	// Ensure that the start time for the results is on the start of the window.
	am.queryTMinWindow = am.queryTMin
	if am.intervalSize > 0 && am.numIntervals > 1 {
		am.queryTMinWindow = am.queryTMinWindow / am.intervalSize * am.intervalSize
	}

	// Create the TagSet cursors for the Mapper.
	for _, src := range am.stmt.Sources {
		mm, ok := src.(*influxql.Measurement)
		if !ok {
			return fmt.Errorf("invalid source type: %#v", src)
		}

		m := am.shard.index.Measurement(mm.Name)
		if m == nil {
			// This shard have never received data for the measurement. No Mapper
			// required.
			return nil
		}

		// Create tagset cursors and determine various field types within SELECT statement.
		tsf, err := createTagSetsAndFields(m, am.stmt)
		if err != nil {
			return err
		}
		tagSets := tsf.tagSets
		am.selectFields = tsf.selectFields
		am.selectTags = tsf.selectTags
		am.whereFields = tsf.whereFields

		// Validate that group by is not a field
		if err := m.ValidateGroupBy(am.stmt); err != nil {
			return err
		}

		// SLIMIT and SOFFSET the unique series
		if am.stmt.SLimit > 0 || am.stmt.SOffset > 0 {
			if am.stmt.SOffset > len(tagSets) {
				tagSets = nil
			} else {
				if am.stmt.SOffset+am.stmt.SLimit > len(tagSets) {
					am.stmt.SLimit = len(tagSets) - am.stmt.SOffset
				}

				tagSets = tagSets[am.stmt.SOffset : am.stmt.SOffset+am.stmt.SLimit]
			}
		}

		// Create all cursors for reading the data from this shard.
		for _, t := range tagSets {
			cursors := []*seriesCursor{}

			for i, key := range t.SeriesKeys {
				c := createCursorForSeries(am.tx, am.shard, key)
				if c == nil {
					// No data exists for this key.
					continue
				}
				cm := newSeriesCursor(c, t.Filters[i])
				cursors = append(cursors, cm)
			}
			tsc := newTagSetCursor(m.Name, t.Tags, cursors, am.shard.FieldCodec(m.Name))
			am.cursors = append(am.cursors, tsc)
		}
		sort.Sort(tagSetCursors(am.cursors))
	}

	return nil
}
Exemplo n.º 6
0
// Open opens the local mapper.
func (lm *LocalMapper) Open() error {
	var err error

	// Get a read-only transaction.
	tx, err := lm.shard.engine.Begin(false)
	if err != nil {
		return err
	}
	lm.tx = tx

	if lm.selectStmt == nil {
		return lm.openMeta()
	}

	// Set all time-related parameters on the mapper.
	lm.queryTMin, lm.queryTMax = influxql.TimeRangeAsEpochNano(lm.selectStmt.Condition)

	if !lm.rawMode {
		if err := lm.initializeMapFunctions(); err != nil {
			return err
		}

		// For GROUP BY time queries, limit the number of data points returned by the limit and offset
		d, err := lm.selectStmt.GroupByInterval()
		if err != nil {
			return err
		}
		lm.intervalSize = d.Nanoseconds()
		if lm.queryTMin == 0 || lm.intervalSize == 0 {
			lm.numIntervals = 1
			lm.intervalSize = lm.queryTMax - lm.queryTMin
		} else {
			intervalTop := lm.queryTMax/lm.intervalSize*lm.intervalSize + lm.intervalSize
			intervalBottom := lm.queryTMin / lm.intervalSize * lm.intervalSize
			lm.numIntervals = int((intervalTop - intervalBottom) / lm.intervalSize)
		}

		if lm.selectStmt.Limit > 0 || lm.selectStmt.Offset > 0 {
			// ensure that the offset isn't higher than the number of points we'd get
			if lm.selectStmt.Offset > lm.numIntervals {
				return nil
			}

			// Take the lesser of either the pre computed number of GROUP BY buckets that
			// will be in the result or the limit passed in by the user
			if lm.selectStmt.Limit < lm.numIntervals {
				lm.numIntervals = lm.selectStmt.Limit
			}
		}

		// If we are exceeding our MaxGroupByPoints error out
		if lm.numIntervals > MaxGroupByPoints {
			return errors.New("too many points in the group by interval. maybe you forgot to specify a where time clause?")
		}

		// Ensure that the start time for the results is on the start of the window.
		lm.queryTMinWindow = lm.queryTMin
		if lm.intervalSize > 0 {
			lm.queryTMinWindow = lm.queryTMinWindow / lm.intervalSize * lm.intervalSize
		}
	}

	selectFields := newStringSet()
	selectTags := newStringSet()
	whereFields := newStringSet()

	// Create the TagSet cursors for the Mapper.
	for _, src := range lm.selectStmt.Sources {
		mm, ok := src.(*influxql.Measurement)
		if !ok {
			return fmt.Errorf("invalid source type: %#v", src)
		}

		m := lm.shard.index.Measurement(mm.Name)
		if m == nil {
			// This shard have never received data for the measurement. No Mapper
			// required.
			return nil
		}

		// Validate that ANY GROUP BY is not a field for thie measurement.
		if err := m.ValidateGroupBy(lm.selectStmt); err != nil {
			return err
		}

		// Create tagset cursors and determine various field types within SELECT statement.
		tsf, err := createTagSetsAndFields(m, lm.selectStmt)
		if err != nil {
			return err
		}
		tagSets := tsf.tagSets
		selectFields.add(tsf.selectFields...)
		selectTags.add(tsf.selectTags...)
		whereFields.add(tsf.whereFields...)

		// Validate that any GROUP BY is not on a field
		if err := m.ValidateGroupBy(lm.selectStmt); err != nil {
			return err
		}

		// SLIMIT and SOFFSET the unique series
		if lm.selectStmt.SLimit > 0 || lm.selectStmt.SOffset > 0 {
			if lm.selectStmt.SOffset > len(tagSets) {
				tagSets = nil
			} else {
				if lm.selectStmt.SOffset+lm.selectStmt.SLimit > len(tagSets) {
					lm.selectStmt.SLimit = len(tagSets) - lm.selectStmt.SOffset
				}

				tagSets = tagSets[lm.selectStmt.SOffset : lm.selectStmt.SOffset+lm.selectStmt.SLimit]
			}
		}

		// Create all cursors for reading the data from this shard.
		for _, t := range tagSets {
			cursors := []*seriesCursor{}

			for i, key := range t.SeriesKeys {
				c := lm.tx.Cursor(key)
				if c == nil {
					// No data exists for this key.
					continue
				}
				cm := newSeriesCursor(c, t.Filters[i])
				cursors = append(cursors, cm)
			}

			tsc := newTagSetCursor(m.Name, t.Tags, cursors, lm.shard.FieldCodec(m.Name))
			// Prime the buffers.
			for i := 0; i < len(tsc.cursors); i++ {
				k, v := tsc.cursors[i].SeekTo(lm.queryTMin)
				tsc.keyBuffer[i] = k
				tsc.valueBuffer[i] = v
			}
			lm.cursors = append(lm.cursors, tsc)
		}
		sort.Sort(tagSetCursors(lm.cursors))
	}

	lm.selectFields = selectFields.list()
	lm.selectTags = selectTags.list()
	lm.whereFields = whereFields.list()

	// If the query does not aggregate, then at least 1 SELECT field should be present.
	if lm.rawMode && len(lm.selectFields) == 0 {
		// None of the SELECT fields exist in this data. Wipe out all tagset cursors.
		lm.cursors = nil
	}

	return nil
}