func NewDerivativeAggregator(q *parser.SelectQuery, v *parser.Value, defaultValue *parser.Value) (Aggregator, error) {
	if len(v.Elems) != 1 {
		return nil, common.NewQueryError(common.WrongNumberOfArguments, "function derivative() requires exactly one argument")
	}

	if v.Elems[0].Type == parser.ValueWildcard {
		return nil, common.NewQueryError(common.InvalidArgument, "function derivative() doesn't work with wildcards")
	}

	wrappedDefaultValue, err := wrapDefaultValue(defaultValue)
	if err != nil {
		return nil, err
	}

	da := &DerivativeAggregator{
		AbstractAggregator: AbstractAggregator{
			value: v.Elems[0],
		},
		defaultValue: wrappedDefaultValue,
		alias:        v.Alias,
	}

	da.duration, _, err = q.GetGroupByClause().GetGroupByTime()
	if err != nil {
		return nil, err
	}

	return da, nil
}
示例#2
0
func containsArithmeticOperators(query *parser.SelectQuery) bool {
	for _, column := range query.GetColumnNames() {
		if column.Type == parser.ValueExpression {
			return true
		}
	}
	return false
}
示例#3
0
func Filter(query *parser.SelectQuery, series *protocol.Series) (*protocol.Series, error) {
	if query.GetWhereCondition() == nil {
		return series, nil
	}

	columns := map[string]struct{}{}
	if query.GetFromClause().Type == parser.FromClauseInnerJoin {
	outer:
		for t, cs := range query.GetResultColumns() {
			for _, c := range cs {
				// if this is a wildcard select, then drop all columns and
				// just use '*'
				if c == "*" {
					columns = make(map[string]struct{}, 1)
					columns[c] = struct{}{}
					break outer
				}
				columns[t.Name+"."+c] = struct{}{}
			}
		}
	} else {
		for _, cs := range query.GetResultColumns() {
			for _, c := range cs {
				columns[c] = struct{}{}
			}
		}
	}

	points := series.Points
	series.Points = nil
	for _, point := range points {
		ok, err := matches(query.GetWhereCondition(), series.Fields, point)

		if err != nil {
			return nil, err
		}

		if ok {
			filterColumns(columns, series.Fields, point)
			series.Points = append(series.Points, point)
		}
	}

	if _, ok := columns["*"]; !ok {
		newFields := []string{}
		for _, f := range series.Fields {
			if _, ok := columns[f]; !ok {
				continue
			}

			newFields = append(newFields, f)
		}
		series.Fields = newFields
	}
	return series, nil
}
示例#4
0
func (s *RaftServer) runContinuousQuery(db string, query *parser.SelectQuery, start time.Time, end time.Time) {
	adminName := s.clusterConfig.GetClusterAdmins()[0]
	clusterAdmin := s.clusterConfig.GetClusterAdmin(adminName)
	intoClause := query.GetIntoClause()
	targetName := intoClause.Target.Name
	queryString := query.GetQueryStringWithTimesAndNoIntoClause(start, end)

	writer := NewContinuousQueryWriter(s.coordinator, db, targetName, query)
	s.coordinator.RunQuery(clusterAdmin, db, queryString, writer)
}
示例#5
0
func (self *QueryEngine) executeArithmeticQuery(query *parser.SelectQuery, yield func(*protocol.Series) error) error {

	names := map[string]*parser.Value{}
	for idx, v := range query.GetColumnNames() {
		switch v.Type {
		case parser.ValueSimpleName:
			names[v.Name] = v
		case parser.ValueFunctionCall:
			names[v.Name] = v
		case parser.ValueExpression:
			if v.Alias != "" {
				names[v.Alias] = v
			} else {
				names["expr"+strconv.Itoa(idx)] = v
			}
		}
	}

	return self.distributeQuery(query, func(series *protocol.Series) error {
		if len(series.Points) == 0 {
			yield(series)
			return nil
		}

		newSeries := &protocol.Series{
			Name: series.Name,
		}

		// create the new column names
		for name := range names {
			newSeries.Fields = append(newSeries.Fields, name)
		}

		for _, point := range series.Points {
			newPoint := &protocol.Point{
				Timestamp:      point.Timestamp,
				SequenceNumber: point.SequenceNumber,
			}
			for _, field := range newSeries.Fields {
				value := names[field]
				v, err := GetValue(value, series.Fields, point)
				if err != nil {
					log.Error("Error in arithmetic computation: %s", err)
					return err
				}
				newPoint.Values = append(newPoint.Values, v)
			}
			newSeries.Points = append(newSeries.Points, newPoint)
		}

		yield(newSeries)

		return nil
	})
}
示例#6
0
func getJoinYield(query *parser.SelectQuery, yield func(*protocol.Series) error) func(*protocol.Series) error {
	var lastPoint1 *protocol.Point
	var lastFields1 []string
	var lastPoint2 *protocol.Point
	var lastFields2 []string

	table1 := query.GetFromClause().Names[0].GetAlias()
	table2 := query.GetFromClause().Names[1].GetAlias()
	name := table1 + "_join_" + table2

	return mergeYield(table1, table2, false, query.Ascending, func(s *protocol.Series) error {
		if *s.Name == table1 {
			lastPoint1 = s.Points[len(s.Points)-1]
			if lastFields1 == nil {
				for _, f := range s.Fields {
					lastFields1 = append(lastFields1, table1+"."+f)
				}
			}
		}

		if *s.Name == table2 {
			lastPoint2 = s.Points[len(s.Points)-1]
			if lastFields2 == nil {
				for _, f := range s.Fields {
					lastFields2 = append(lastFields2, table2+"."+f)
				}
			}
		}

		if lastPoint1 == nil || lastPoint2 == nil {
			return nil
		}

		newSeries := &protocol.Series{
			Name:   &name,
			Fields: append(lastFields1, lastFields2...),
			Points: []*protocol.Point{
				{
					Values:    append(lastPoint1.Values, lastPoint2.Values...),
					Timestamp: lastPoint2.Timestamp,
				},
			},
		}

		lastPoint1 = nil
		lastPoint2 = nil

		filteredSeries, _ := Filter(query, newSeries)
		if len(filteredSeries.Points) > 0 {
			return yield(newSeries)
		}
		return nil
	})
}
示例#7
0
// distribute query and possibly do the merge/join before yielding the points
func (self *QueryEngine) distributeQuery(query *parser.SelectQuery, yield func(*protocol.Series) error) error {
	// see if this is a merge query
	fromClause := query.GetFromClause()
	if fromClause.Type == parser.FromClauseMerge {
		yield = getMergeYield(fromClause.Names[0].Name.Name, fromClause.Names[1].Name.Name, query.Ascending, yield)
	}

	if fromClause.Type == parser.FromClauseInnerJoin {
		yield = getJoinYield(query, yield)
	}

	self.yield = yield
	return nil
}
示例#8
0
func NewJoinEngine(query *parser.SelectQuery, next Processor) Processor {
	table1 := query.GetFromClause().Names[0].GetAlias()
	table2 := query.GetFromClause().Names[1].GetAlias()
	name := table1 + "_join_" + table2

	joinEngine := &JoinEngine{
		next:   next,
		name:   name,
		table1: table1,
		table2: table2,
		query:  query,
	}
	mergeEngine := NewCommonMergeEngine(table1, table2, false, query.Ascending, joinEngine)
	return mergeEngine
}
示例#9
0
func (self *QueryEngine) executeCountQueryWithGroupBy(query *parser.SelectQuery, yield func(*protocol.Series) error) error {
	self.aggregateYield = yield
	duration, err := query.GetGroupByClause().GetGroupByTime()
	if err != nil {
		return err
	}

	self.isAggregateQuery = true
	self.duration = duration
	self.aggregators = []Aggregator{}

	for _, value := range query.GetColumnNames() {
		if !value.IsFunctionCall() {
			continue
		}
		lowerCaseName := strings.ToLower(value.Name)
		initializer := registeredAggregators[lowerCaseName]
		if initializer == nil {
			return common.NewQueryError(common.InvalidArgument, fmt.Sprintf("Unknown function %s", value.Name))
		}
		aggregator, err := initializer(query, value, query.GetGroupByClause().FillValue)
		if err != nil {
			return common.NewQueryError(common.InvalidArgument, fmt.Sprintf("%s", err))
		}
		self.aggregators = append(self.aggregators, aggregator)
	}

	for _, elem := range query.GetGroupByClause().Elems {
		if elem.IsFunctionCall() {
			continue
		}
		self.elems = append(self.elems, elem)
	}

	self.fillWithZero = query.GetGroupByClause().FillWithZero

	self.initializeFields()

	err = self.distributeQuery(query, func(series *protocol.Series) error {
		if len(series.Points) == 0 {
			return nil
		}

		return self.aggregateValuesForSeries(series)
	})

	return err
}
示例#10
0
func NewQueryEngine(next Processor, query *parser.SelectQuery, shards []uint32) (Processor, error) {
	limit := query.Limit

	var engine Processor = NewPassthroughEngineWithLimit(next, 1, limit)

	var err error
	if query.HasAggregates() {
		engine, err = NewAggregatorEngine(query, engine)
	} else if query.ContainsArithmeticOperators() {
		engine, err = NewArithmeticEngine(query, engine)
	}

	fromClause := query.GetFromClause()

	switch fromClause.Type {
	case parser.FromClauseInnerJoin:
		engine = NewJoinEngine(shards, query, engine)
	case parser.FromClauseMerge:
		tables := make([]string, len(fromClause.Names))
		for i, name := range fromClause.Names {
			tables[i] = name.Name.Name
		}
		engine = NewMergeEngine(shards, query.Ascending, engine)
	case parser.FromClauseMergeRegex:
		// At this point the regex should be expanded to the list of
		// tables that will be queries
		panic("QueryEngine cannot be called with merge function")
	}

	if err != nil {
		return nil, err
	}
	return engine, nil
}
示例#11
0
func NewArithmeticEngine(query *parser.SelectQuery, next Processor) (*ArithmeticEngine, error) {

	names := map[string]*parser.Value{}
	for idx, v := range query.GetColumnNames() {
		switch v.Type {
		case parser.ValueSimpleName:
			names[v.Name] = v
		case parser.ValueFunctionCall:
			names[v.Name] = v
		case parser.ValueExpression:
			if v.Alias != "" {
				names[v.Alias] = v
			} else {
				names["expr"+strconv.Itoa(idx)] = v
			}
		}
	}

	return &ArithmeticEngine{
		next:  next,
		names: names,
	}, nil
}
示例#12
0
func NewQueryEngine(query *parser.SelectQuery, responseChan chan *protocol.Response) (*QueryEngine, error) {
	limit := query.Limit

	queryEngine := &QueryEngine{
		query:          query,
		where:          query.GetWhereCondition(),
		limiter:        NewLimiter(limit),
		responseChan:   responseChan,
		seriesToPoints: make(map[string]*protocol.Series),
		// stats stuff
		explain:       query.IsExplainQuery(),
		runStartTime:  0,
		runEndTime:    0,
		pointsRead:    0,
		pointsWritten: 0,
		shardId:       0,
		shardLocal:    false, //that really doesn't matter if it is not EXPLAIN query
		duration:      nil,
		seriesStates:  make(map[string]*SeriesState),
	}

	if queryEngine.explain {
		queryEngine.runStartTime = float64(time.Now().UnixNano()) / float64(time.Millisecond)
	}

	yield := func(series *protocol.Series) error {
		var response *protocol.Response

		queryEngine.limiter.calculateLimitAndSlicePoints(series)
		if len(series.Points) == 0 {
			return nil
		}
		if queryEngine.explain {
			//TODO: We may not have to send points, just count them
			queryEngine.pointsWritten += int64(len(series.Points))
		}
		response = &protocol.Response{Type: &queryResponse, Series: series}
		responseChan <- response
		return nil
	}

	var err error
	if query.HasAggregates() {
		err = queryEngine.executeCountQueryWithGroupBy(query, yield)
	} else if containsArithmeticOperators(query) {
		err = queryEngine.executeArithmeticQuery(query, yield)
	} else {
		err = queryEngine.distributeQuery(query, yield)
	}

	if err != nil {
		return nil, err
	}
	return queryEngine, nil
}
示例#13
0
func (s *RaftServer) runContinuousQuery(db string, query *parser.SelectQuery, start time.Time, end time.Time) {
	adminName := s.clusterConfig.GetClusterAdmins()[0]
	clusterAdmin := s.clusterConfig.GetClusterAdmin(adminName)
	intoClause := query.GetIntoClause()
	targetName := intoClause.Target.Name
	queryString := query.GetQueryStringWithTimesAndNoIntoClause(start, end)

	f := func(series *protocol.Series) error {
		return s.coordinator.InterpolateValuesAndCommit(query.GetQueryString(), db, series, targetName, true)
	}

	writer := NewContinuousQueryWriter(f)
	s.coordinator.RunQuery(clusterAdmin, db, queryString, writer)
}
示例#14
0
// Create and return a new JoinEngine given the shards that will be
// processed and the query
func NewJoinEngine(shards []uint32, query *parser.SelectQuery, next Processor) Processor {
	tableNames := query.GetFromClause().Names
	name := query.GetFromClause().GetString()
	log4go.Debug("NewJoinEngine: shards=%v, query=%s, next=%s, tableNames=%v, name=%s",
		shards, query.GetQueryString(), next.Name(), tableNames, name)

	joinEngine := &JoinEngine{
		next:        next,
		name:        name,
		tablesState: make([]joinEngineState, len(tableNames)),
		tableIdx:    make(map[string]int, len(tableNames)),
		query:       query,
		pts:         0,
	}

	for i, tn := range tableNames {
		alias := tn.GetAlias()
		joinEngine.tablesState[i] = joinEngineState{}
		joinEngine.tableIdx[alias] = i
	}

	mergeEngine := NewCommonMergeEngine(shards, false, query.Ascending, joinEngine)
	return mergeEngine
}
示例#15
0
文件: engine.go 项目: aiyi/influxdb
func NewQueryEngine(next Processor, query *parser.SelectQuery) (Processor, error) {
	limit := query.Limit

	var engine Processor = NewPassthroughEngineWithLimit(next, 1, limit)

	var err error
	if query.HasAggregates() {
		engine, err = NewAggregatorEngine(query, engine)
	} else if query.ContainsArithmeticOperators() {
		engine, err = NewArithmeticEngine(query, engine)
	}

	fromClause := query.GetFromClause()
	if fromClause.Type == parser.FromClauseMerge {
		engine = NewMergeEngine(fromClause.Names[0].Name.Name, fromClause.Names[1].Name.Name, query.Ascending, engine)
	} else if fromClause.Type == parser.FromClauseInnerJoin {
		engine = NewJoinEngine(query, engine)
	}

	if err != nil {
		return nil, err
	}
	return engine, nil
}
示例#16
0
func NewFilteringEngine(query *parser.SelectQuery, processor Processor) *FilteringEngine {
	shouldFilter := query.GetWhereCondition() != nil
	return &FilteringEngine{query, processor, shouldFilter}
}
示例#17
0
func (self *QueryEngine) executeCountQueryWithGroupBy(query *parser.SelectQuery, yield func(*protocol.Series) error) error {
	self.aggregateYield = yield
	duration, err := query.GetGroupByClause().GetGroupByTime()
	if err != nil {
		return err
	}

	self.isAggregateQuery = true
	self.duration = duration
	self.aggregators = []Aggregator{}

	for _, value := range query.GetColumnNames() {
		if !value.IsFunctionCall() {
			continue
		}
		lowerCaseName := strings.ToLower(value.Name)
		initializer := registeredAggregators[lowerCaseName]
		if initializer == nil {
			return common.NewQueryError(common.InvalidArgument, fmt.Sprintf("Unknown function %s", value.Name))
		}
		aggregator, err := initializer(query, value, query.GetGroupByClause().FillValue)
		if err != nil {
			return common.NewQueryError(common.InvalidArgument, fmt.Sprintf("%s", err))
		}
		self.aggregators = append(self.aggregators, aggregator)
	}

	for _, elem := range query.GetGroupByClause().Elems {
		if elem.IsFunctionCall() {
			continue
		}
		self.elems = append(self.elems, elem)
	}

	self.fillWithZero = query.GetGroupByClause().FillWithZero

	// This is a special case for issue #426. If the start time is
	// specified and there's a group by clause and fill with zero, then
	// we need to fill the entire range from start time to end time
	if query.IsStartTimeSpecified() && self.duration != nil && self.fillWithZero {
		self.startTimeSpecified = true
		self.startTime = query.GetStartTime().Truncate(*self.duration).UnixNano() / 1000
		self.endTime = query.GetEndTime().Truncate(*self.duration).UnixNano() / 1000
	}

	self.initializeFields()

	err = self.distributeQuery(query, func(series *protocol.Series) error {
		if len(series.Points) == 0 {
			return nil
		}

		return self.aggregateValuesForSeries(series)
	})

	return err
}
示例#18
0
func NewAggregatorEngine(query *parser.SelectQuery, next Processor) (*AggregatorEngine, error) {
	ae := &AggregatorEngine{
		next:         next,
		seriesStates: make(map[string]*SeriesState),
		ascending:    query.Ascending,
	}

	var err error
	ae.duration, ae.irregularInterval, err = query.GetGroupByClause().GetGroupByTime()
	if err != nil {
		return nil, err
	}

	ae.aggregators = []Aggregator{}

	for _, value := range query.GetColumnNames() {
		if !value.IsFunctionCall() {
			continue
		}
		lowerCaseName := strings.ToLower(value.Name)
		initializer := registeredAggregators[lowerCaseName]
		if initializer == nil {
			return nil, common.NewQueryError(common.InvalidArgument, fmt.Sprintf("Unknown function %s", value.Name))
		}
		aggregator, err := initializer(query, value, query.GetGroupByClause().FillValue)
		if err != nil {
			return nil, common.NewQueryError(common.InvalidArgument, fmt.Sprintf("%s", err))
		}
		ae.aggregators = append(ae.aggregators, aggregator)
	}

	for _, elem := range query.GetGroupByClause().Elems {
		if elem.IsFunctionCall() {
			continue
		}
		ae.elems = append(ae.elems, elem)
	}

	ae.isFillQuery = query.GetGroupByClause().FillWithZero

	// This is a special case for issue #426. If the start time is
	// specified and there's a group by clause and fill with zero, then
	// we need to fill the entire range from start time to end time
	if query.IsStartTimeSpecified() && ae.duration != nil && ae.isFillQuery {
		ae.startTimeSpecified = true
		ae.startTime = query.GetStartTime().Truncate(*ae.duration).UnixNano() / 1000
		ae.endTime = query.GetEndTime().Truncate(*ae.duration).UnixNano() / 1000
	}

	ae.initializeFields()

	return ae, nil
}