コード例 #1
0
ファイル: leveldb_datastore.go プロジェクト: jondot/influxdb
func (self *LevelDbDatastore) ExecuteQuery(user common.User, database string, query *parser.Query, yield func(*protocol.Series) error) error {
	seriesAndColumns := query.GetReferencedColumns()
	hasAccess := true
	for series, columns := range seriesAndColumns {
		if regex, ok := series.GetCompiledRegex(); ok {
			seriesNames := self.getSeriesForDbAndRegex(database, regex)
			for _, name := range seriesNames {
				if !user.HasReadAccess(name) {
					hasAccess = false
					continue
				}
				err := self.executeQueryForSeries(database, name, columns, query, yield)
				if err != nil {
					return err
				}
			}
		} else {
			if !user.HasReadAccess(series.Name) {
				hasAccess = false
				continue
			}
			err := self.executeQueryForSeries(database, series.Name, columns, query, yield)
			if err != nil {
				return err
			}
		}
	}
	if !hasAccess {
		return fmt.Errorf("You don't have permission to access one or more time series")
	}
	return nil
}
コード例 #2
0
// Return the number of dropped ticks from filtering. if the series
// had more than one alias, returns the min of all dropped ticks
func (self *LevelDbDatastore) sendBatch(query *parser.Query, series *protocol.Series, yield func(series *protocol.Series) error) (int, error) {
	dropped := int(math.MaxInt32)

	for _, alias := range query.GetTableAliases(*series.Name) {
		_alias := alias
		newSeries := &protocol.Series{Name: &_alias, Points: series.Points, Fields: series.Fields}

		lengthBeforeFiltering := len(newSeries.Points)
		var filteredResult *protocol.Series
		if query.GetFromClause().Type == parser.FromClauseInnerJoin {
			filteredResult = newSeries
		} else {
			filteredResult, _ = Filter(query, newSeries)
		}
		_dropped := lengthBeforeFiltering - len(filteredResult.Points)
		if _dropped < dropped {
			dropped = _dropped
		}
		if err := yield(filteredResult); err != nil {
			return 0, err
		}
	}

	return dropped, nil
}
コード例 #3
0
ファイル: engine.go プロジェクト: nvdnkpr/influxdb
func isAggregateQuery(query *parser.Query) bool {
	for _, column := range query.GetColumnNames() {
		if column.IsFunctionCall() {
			return true
		}
	}
	return false
}
コード例 #4
0
ファイル: merge.go プロジェクト: pombredanne/influxdb
func getJoinYield(query *parser.Query, yield func(*protocol.Series) error) func(*protocol.Series) error {
	var lastPoint1 *protocol.Point
	var lastFields1 []string
	var lastPoint2 *protocol.Point
	var lastFields2 []string

	table1 := query.GetFromClause().Names[0].GetAlias()
	table2 := query.GetFromClause().Names[1].GetAlias()
	name := table1 + "_join_" + table2

	return mergeYield(table1, table2, false, query.Ascending, func(s *protocol.Series) error {
		if *s.Name == table1 {
			lastPoint1 = s.Points[len(s.Points)-1]
			if lastFields1 == nil {
				for _, f := range s.Fields {
					lastFields1 = append(lastFields1, table1+"."+f)
				}
			}
		}

		if *s.Name == table2 {
			lastPoint2 = s.Points[len(s.Points)-1]
			if lastFields2 == nil {
				for _, f := range s.Fields {
					lastFields2 = append(lastFields2, table2+"."+f)
				}
			}
		}

		if lastPoint1 == nil || lastPoint2 == nil {
			return nil
		}

		newSeries := &protocol.Series{
			Name:   &name,
			Fields: append(lastFields1, lastFields2...),
			Points: []*protocol.Point{
				&protocol.Point{
					Values:         append(lastPoint1.Values, lastPoint2.Values...),
					Timestamp:      lastPoint2.Timestamp,
					SequenceNumber: lastPoint2.SequenceNumber,
				},
			},
		}

		lastPoint1 = nil
		lastPoint2 = nil

		filteredSeries, _ := datastore.Filter(query, newSeries)
		if len(filteredSeries.Points) > 0 {
			return yield(newSeries)
		}
		return nil
	})
}
コード例 #5
0
ファイル: engine.go プロジェクト: pombredanne/influxdb
// distribute query and possibly do the merge/join before yielding the points
func (self *QueryEngine) distributeQuery(user common.User, database string, query *parser.Query, yield func(*protocol.Series) error) (err error) {
	// see if this is a merge query
	fromClause := query.GetFromClause()
	if fromClause.Type == parser.FromClauseMerge {
		yield = getMergeYield(fromClause.Names[0].Name.Name, fromClause.Names[1].Name.Name, query.Ascending, yield)
	}

	if fromClause.Type == parser.FromClauseInnerJoin {
		yield = getJoinYield(query, yield)
	}

	return self.coordinator.DistributeQuery(user, database, query, yield)
}
コード例 #6
0
ファイル: aggregator.go プロジェクト: rudrapranay/influxdb
func NewTimestampAggregator(query *parser.Query, _ *parser.Value) (Aggregator, error) {
	duration, err := query.GetGroupByClause().GetGroupByTime()
	if err != nil {
		return nil, err
	}

	var durationPtr *int64

	if duration != nil {
		newDuration := int64(*duration / time.Microsecond)
		durationPtr = &newDuration
	}

	return &TimestampAggregator{
		timestamps: make(map[string]map[interface{}]int64),
		duration:   durationPtr,
	}, nil
}
コード例 #7
0
ファイル: filtering.go プロジェクト: nairboon/influxdb
func Filter(query *parser.Query, series *protocol.Series) (*protocol.Series, error) {
	if query.GetWhereCondition() == nil {
		return series, nil
	}

	columns := map[string]bool{}
	getColumns(query.GetColumnNames(), columns)

	points := series.Points
	series.Points = nil
	for _, point := range points {
		ok, err := matches(query.GetWhereCondition(), series.Fields, point)

		if err != nil {
			return nil, err
		}

		if ok {
			filterColumns(columns, series.Fields, point)
			series.Points = append(series.Points, point)
		}
	}

	if !columns["*"] {
		newFields := []string{}
		for _, f := range series.Fields {
			if _, ok := columns[f]; !ok {
				continue
			}

			newFields = append(newFields, f)
		}
		series.Fields = newFields
	}
	return series, nil
}
コード例 #8
0
ファイル: leveldb_datastore.go プロジェクト: jondot/influxdb
func (self *LevelDbDatastore) executeQueryForSeries(database, series string, columns []string, query *parser.Query, yield func(*protocol.Series) error) error {
	startTimeBytes, endTimeBytes := self.byteArraysForStartAndEndTimes(common.TimeToMicroseconds(query.GetStartTime()), common.TimeToMicroseconds(query.GetEndTime()))

	fields, err := self.getFieldsForSeries(database, series, columns)
	if err != nil {
		return err
	}
	fieldCount := len(fields)
	prefixes := make([][]byte, fieldCount, fieldCount)
	iterators := make([]*levigo.Iterator, fieldCount, fieldCount)
	fieldNames := make([]string, len(fields))

	// start the iterators to go through the series data
	for i, field := range fields {
		fieldNames[i] = field.Name
		prefixes[i] = field.Id
		iterators[i] = self.db.NewIterator(self.readOptions)
		if query.Ascending {
			iterators[i].Seek(append(field.Id, startTimeBytes...))
		} else {
			iterators[i].Seek(append(append(field.Id, endTimeBytes...), MAX_SEQUENCE...))
			if iterators[i].Valid() {
				iterators[i].Prev()
			}
		}
	}

	result := &protocol.Series{Name: &series, Fields: fieldNames, Points: make([]*protocol.Point, 0)}
	rawColumnValues := make([]*rawColumnValue, fieldCount, fieldCount)
	isValid := true

	limit := query.Limit
	if limit == 0 {
		limit = MAX_POINTS_TO_SCAN
	}

	resultByteCount := 0

	// TODO: clean up, this is super gnarly
	// optimize for the case where we're pulling back only a single column or aggregate
	for isValid {
		isValid = false
		latestTimeRaw := make([]byte, 8, 8)
		latestSequenceRaw := make([]byte, 8, 8)
		point := &protocol.Point{Values: make([]*protocol.FieldValue, fieldCount, fieldCount)}
		for i, it := range iterators {
			if rawColumnValues[i] == nil && it.Valid() {
				k := it.Key()
				if len(k) >= 16 {
					t := k[8:16]
					if bytes.Equal(k[:8], fields[i].Id) && bytes.Compare(t, startTimeBytes) > -1 && bytes.Compare(t, endTimeBytes) < 1 {
						v := it.Value()
						s := k[16:]
						rawColumnValues[i] = &rawColumnValue{time: t, sequence: s, value: v}
						timeCompare := bytes.Compare(t, latestTimeRaw)
						if timeCompare == 1 {
							latestTimeRaw = t
							latestSequenceRaw = s
						} else if timeCompare == 0 {
							if bytes.Compare(s, latestSequenceRaw) == 1 {
								latestSequenceRaw = s
							}
						}
					}
				}
			}
		}

		for i, iterator := range iterators {
			if rawColumnValues[i] != nil && bytes.Equal(rawColumnValues[i].time, latestTimeRaw) && bytes.Equal(rawColumnValues[i].sequence, latestSequenceRaw) {
				isValid = true
				if query.Ascending {
					iterator.Next()
				} else {
					iterator.Prev()
				}
				fv := &protocol.FieldValue{}
				err := proto.Unmarshal(rawColumnValues[i].value, fv)
				if err != nil {
					return err
				}
				resultByteCount += len(rawColumnValues[i].value)
				point.Values[i] = fv
				var t uint64
				binary.Read(bytes.NewBuffer(rawColumnValues[i].time), binary.BigEndian, &t)
				time := self.convertUintTimestampToInt64(&t)
				var sequence uint64
				binary.Read(bytes.NewBuffer(rawColumnValues[i].sequence), binary.BigEndian, &sequence)
				seq32 := uint32(sequence)
				point.SetTimestampInMicroseconds(time)
				point.SequenceNumber = &seq32
				rawColumnValues[i] = nil
			}
		}
		if isValid {
			limit -= 1
			result.Points = append(result.Points, point)

			// add byte count for the timestamp and the sequence
			resultByteCount += 16

			// check if we should send the batch along
			if resultByteCount > MAX_SERIES_SIZE {
				filteredResult, _ := Filter(query, result)
				if err := yield(filteredResult); err != nil {
					return err
				}
				resultByteCount = 0
				result = &protocol.Series{Name: &series, Fields: fieldNames, Points: make([]*protocol.Point, 0)}
			}
		}
		if limit < 1 {
			break
		}
	}
	filteredResult, _ := Filter(query, result)
	if err := yield(filteredResult); err != nil {
		return err
	}
	emptyResult := &protocol.Series{Name: &series, Fields: fieldNames, Points: nil}
	return yield(emptyResult)
}
コード例 #9
0
func (self *LevelDbDatastore) executeQueryForSeries(database, series string, columns []string, query *parser.Query, yield func(*protocol.Series) error) error {
	startTimeBytes, endTimeBytes := self.byteArraysForStartAndEndTimes(common.TimeToMicroseconds(query.GetStartTime()), common.TimeToMicroseconds(query.GetEndTime()))

	fields, err := self.getFieldsForSeries(database, series, columns)
	if err != nil {
		return err
	}
	fieldCount := len(fields)
	fieldNames, iterators := self.getIterators(fields, startTimeBytes, endTimeBytes, query.Ascending)

	// iterators :=

	result := &protocol.Series{Name: &series, Fields: fieldNames, Points: make([]*protocol.Point, 0)}
	rawColumnValues := make([]*rawColumnValue, fieldCount, fieldCount)

	limit := query.Limit
	if limit == 0 {
		limit = MAX_POINTS_TO_SCAN
	}

	resultByteCount := 0

	// TODO: clean up, this is super gnarly
	// optimize for the case where we're pulling back only a single column or aggregate
	for {
		isValid := false

		point := &protocol.Point{Values: make([]*protocol.FieldValue, fieldCount, fieldCount)}
		for i, it := range iterators {
			if rawColumnValues[i] != nil || !it.Valid() {
				continue
			}

			key := it.Key()
			if len(key) < 16 {
				continue
			}

			if !isPointInRange(fields[i].Id, startTimeBytes, endTimeBytes, key) {
				continue
			}

			time := key[8:16]
			value := it.Value()
			sequenceNumber := key[16:]

			rawValue := &rawColumnValue{time: time, sequence: sequenceNumber, value: value}
			rawColumnValues[i] = rawValue
		}

		var pointTimeRaw []byte
		var pointSequenceRaw []byte
		// choose the highest (or lowest in case of ascending queries) timestamp
		// and sequence number. that will become the timestamp and sequence of
		// the next point.
		for _, value := range rawColumnValues {
			if value == nil {
				continue
			}

			pointTimeRaw, pointSequenceRaw = value.updatePointTimeAndSequence(pointTimeRaw,
				pointSequenceRaw, query.Ascending)
		}

		for i, iterator := range iterators {
			// if the value is nil, or doesn't match the point's timestamp and sequence number
			// then skip it
			if rawColumnValues[i] == nil ||
				!bytes.Equal(rawColumnValues[i].time, pointTimeRaw) ||
				!bytes.Equal(rawColumnValues[i].sequence, pointSequenceRaw) {

				continue
			}

			// if we emitted at lease one column, then we should keep
			// trying to get more points
			isValid = true

			// advance the iterator to read a new value in the next iteration
			if query.Ascending {
				iterator.Next()
			} else {
				iterator.Prev()
			}
			fv := &protocol.FieldValue{}
			err := proto.Unmarshal(rawColumnValues[i].value, fv)
			if err != nil {
				return err
			}
			resultByteCount += len(rawColumnValues[i].value)
			point.Values[i] = fv
			var t uint64
			binary.Read(bytes.NewBuffer(rawColumnValues[i].time), binary.BigEndian, &t)
			time := self.convertUintTimestampToInt64(&t)
			var sequence uint64
			binary.Read(bytes.NewBuffer(rawColumnValues[i].sequence), binary.BigEndian, &sequence)
			seq32 := uint32(sequence)
			point.SetTimestampInMicroseconds(time)
			point.SequenceNumber = &seq32
			rawColumnValues[i] = nil
		}

		// stop the loop if we ran out of points
		if !isValid {
			break
		}

		limit -= 1
		result.Points = append(result.Points, point)

		// add byte count for the timestamp and the sequence
		resultByteCount += 16

		// check if we should send the batch along
		if resultByteCount > MAX_SERIES_SIZE || limit < 1 {
			dropped, err := self.sendBatch(query, result, yield)
			if err != nil {
				return err
			}
			limit += dropped
			resultByteCount = 0
			result = &protocol.Series{Name: &series, Fields: fieldNames, Points: make([]*protocol.Point, 0)}
		}
		if limit < 1 {
			break
		}
	}
	if _, err := self.sendBatch(query, result, yield); err != nil {
		return err
	}
	emptyResult := &protocol.Series{Name: &series, Fields: fieldNames, Points: nil}
	_, err = self.sendBatch(query, emptyResult, yield)
	return err
}
コード例 #10
0
ファイル: engine.go プロジェクト: nvdnkpr/influxdb
func (self *QueryEngine) executeCountQueryWithGroupBy(user common.User, database string, query *parser.Query,
	yield func(*protocol.Series) error) error {
	duration, err := query.GetGroupByClause().GetGroupByTime()
	if err != nil {
		return err
	}

	aggregators := []Aggregator{}
	for _, value := range query.GetColumnNames() {
		if value.IsFunctionCall() {
			lowerCaseName := strings.ToLower(value.Name)
			initializer := registeredAggregators[lowerCaseName]
			if initializer == nil {
				return common.NewQueryError(common.InvalidArgument, fmt.Sprintf("Unknown function %s", value.Name))
			}
			aggregator, err := initializer(query, value)
			if err != nil {
				return err
			}
			aggregators = append(aggregators, aggregator)
		}
	}
	timestampAggregator, err := NewTimestampAggregator(query, nil)
	if err != nil {
		return err
	}

	groups := make(map[string]map[interface{}]bool)
	groupBy := query.GetGroupByClause()

	var inverse InverseMapper

	err = self.distributeQuery(user, database, query, func(series *protocol.Series) error {
		var mapper Mapper
		mapper, inverse, err = createValuesToInterface(groupBy, series.Fields)
		if err != nil {
			return err
		}

		for _, aggregator := range aggregators {
			if err := aggregator.InitializeFieldsMetadata(series); err != nil {
				return err
			}
		}

		for _, point := range series.Points {
			value := mapper(point)
			for _, aggregator := range aggregators {
				aggregator.AggregatePoint(*series.Name, value, point)
			}

			timestampAggregator.AggregatePoint(*series.Name, value, point)
			seriesGroups := groups[*series.Name]
			if seriesGroups == nil {
				seriesGroups = make(map[interface{}]bool)
				groups[*series.Name] = seriesGroups
			}
			seriesGroups[value] = true
		}

		return nil
	})

	if err != nil {
		return err
	}

	var sequenceNumber uint32 = 1
	fields := []string{}

	for _, aggregator := range aggregators {
		columnName := aggregator.ColumnName()
		fields = append(fields, columnName)
	}

	for _, value := range groupBy {
		if value.IsFunctionCall() {
			continue
		}

		tempName := value.Name
		fields = append(fields, tempName)
	}

	for table, tableGroups := range groups {
		tempTable := table
		points := []*protocol.Point{}
		for groupId, _ := range tableGroups {
			timestamp := *timestampAggregator.GetValue(table, groupId)[0].Int64Value
			values := [][]*protocol.FieldValue{}

			for _, aggregator := range aggregators {
				values = append(values, aggregator.GetValue(table, groupId))
			}

			// do cross product of all the values
			values = crossProduct(values)

			for _, v := range values {
				/* groupPoints := []*protocol.Point{} */
				point := &protocol.Point{
					SequenceNumber: &sequenceNumber,
					Values:         v,
				}
				point.SetTimestampInMicroseconds(timestamp)

				// FIXME: this should be looking at the fields slice not the group by clause
				// FIXME: we should check whether the selected columns are in the group by clause
				for idx, _ := range groupBy {
					if duration != nil && idx == 0 {
						continue
					}

					value := inverse(groupId, idx)

					switch x := value.(type) {
					case string:
						point.Values = append(point.Values, &protocol.FieldValue{StringValue: &x})
					case bool:
						point.Values = append(point.Values, &protocol.FieldValue{BoolValue: &x})
					case float64:
						point.Values = append(point.Values, &protocol.FieldValue{DoubleValue: &x})
					case int64:
						point.Values = append(point.Values, &protocol.FieldValue{Int64Value: &x})
					}
				}

				points = append(points, point)
			}
		}
		expectedData := &protocol.Series{
			Name:   &tempTable,
			Fields: fields,
			Points: points,
		}
		yield(expectedData)
	}

	return nil
}