func (self *QueryEngine) YieldSeries(seriesIncoming *protocol.Series) (shouldContinue bool) { if self.explain { self.pointsRead += int64(len(seriesIncoming.Points)) } seriesName := seriesIncoming.GetName() self.seriesToPoints[seriesName] = &protocol.Series{Name: &seriesName, Fields: seriesIncoming.Fields} return self.yieldSeriesData(seriesIncoming) && !self.limiter.hitLimit(seriesIncoming.GetName()) }
// merges two time series making sure that the resulting series has // the union of the two series columns and the values set // properly. will panic if the two series don't have the same name func MergeSeries(s1, s2 *protocol.Series) *protocol.Series { if s1.GetName() != s2.GetName() { panic("the two series don't have the same name") } // if the two series have the same columns and in the same order // append the points and return. if reflect.DeepEqual(s1.Fields, s2.Fields) { s1.Points = append(s1.Points, s2.Points...) return s1 } columns := map[string]struct{}{} for _, cs := range [][]string{s1.Fields, s2.Fields} { for _, c := range cs { columns[c] = struct{}{} } } points := append(pointMaps(s1), pointMaps(s2)...) fieldsSlice := make([]string, 0, len(columns)) for c := range columns { fieldsSlice = append(fieldsSlice, c) } resultPoints := make([]*protocol.Point, 0, len(points)) for idx, point := range points { resultPoint := &protocol.Point{} for _, field := range fieldsSlice { value := point[field] if value == nil { value = &protocol.FieldValue{ IsNull: &TRUE, } } resultPoint.Values = append(resultPoint.Values, value) if idx < len(s1.Points) { resultPoint.Timestamp = s1.Points[idx].Timestamp resultPoint.SequenceNumber = s1.Points[idx].SequenceNumber } else { resultPoint.Timestamp = s2.Points[idx-len(s1.Points)].Timestamp resultPoint.SequenceNumber = s2.Points[idx-len(s1.Points)].SequenceNumber } } resultPoints = append(resultPoints, resultPoint) } // otherwise, merge the columns result := &protocol.Series{ Name: s1.Name, Fields: fieldsSlice, Points: resultPoints, } return result }
func (self *AllPointsWriter) yield(series *protocol.Series) error { oldSeries := self.memSeries[*series.Name] if oldSeries == nil { self.memSeries[*series.Name] = series return nil } self.memSeries[series.GetName()] = MergeSeries(self.memSeries[series.GetName()], series) return nil }
func (self *PassthroughEngine) YieldSeries(seriesIncoming *protocol.Series) bool { log.Debug("PassthroughEngine YieldSeries %d", len(seriesIncoming.Points)) if *seriesIncoming.Name == "explain query" { self.responseType = &explainQueryResponse log.Debug("Response Changed!") } else { self.responseType = &queryResponse } self.limiter.calculateLimitAndSlicePoints(seriesIncoming) if len(seriesIncoming.Points) == 0 { log.Error("Not sent == 0") return false } if self.response == nil { self.response = &protocol.Response{ Type: self.responseType, Series: seriesIncoming, } } else if self.response.Series.GetName() != seriesIncoming.GetName() { self.responseChan <- self.response self.response = &protocol.Response{ Type: self.responseType, Series: seriesIncoming, } } else if len(self.response.Series.Points) > self.maxPointsInResponse { self.responseChan <- self.response self.response = &protocol.Response{ Type: self.responseType, Series: seriesIncoming, } } else { self.response.Series.Points = append(self.response.Series.Points, seriesIncoming.Points...) } return !self.limiter.hitLimit(seriesIncoming.GetName()) //return true }
// We have three types of queries: // 1. time() without fill // 2. time() with fill // 3. no time() // // For (1) we flush as soon as a new bucket start, the prefix tree // keeps track of the other group by columns without the time // bucket. We reset the trie once the series is yielded. For (2), we // keep track of all group by columns with time being the last level // in the prefix tree. At the end of the query we step through [start // time, end time] in self.duration steps and get the state from the // prefix tree, using default values for groups without state in the // prefix tree. For the last case we keep the groups in the prefix // tree and on close() we loop through the groups and flush their // values with a timestamp equal to now() func (self *QueryEngine) aggregateValuesForSeries(series *protocol.Series) error { for _, aggregator := range self.aggregators { if err := aggregator.InitializeFieldsMetadata(series); err != nil { return err } } seriesState := self.getSeriesState(series.GetName()) currentRange := seriesState.pointsRange includeTimestampInGroup := self.duration != nil && self.fillWithZero var group []*protocol.FieldValue if !includeTimestampInGroup { group = make([]*protocol.FieldValue, len(self.elems)) } else { group = make([]*protocol.FieldValue, len(self.elems)+1) } for _, point := range series.Points { currentRange.UpdateRange(point) // this is a groupby with time() and no fill, flush as soon as we // start a new bucket if self.duration != nil && !self.fillWithZero { timestamp := self.getTimestampFromPoint(point) // this is the timestamp aggregator if seriesState.started && seriesState.lastTimestamp != timestamp { self.runAggregatesForTable(series.GetName()) } seriesState.lastTimestamp = timestamp seriesState.started = true } // get the group this point belongs to for idx, elem := range self.elems { // TODO: create an index from fieldname to index value, err := GetValue(elem, series.Fields, point) if err != nil { return err } group[idx] = value } // if this is a fill() query, add the timestamp at the end if includeTimestampInGroup { timestamp := self.getTimestampFromPoint(point) group[len(self.elems)] = &protocol.FieldValue{Int64Value: protocol.Int64(timestamp)} } // update the state of the given group node := seriesState.trie.GetNode(group) var err error for idx, aggregator := range self.aggregators { node.states[idx], err = aggregator.AggregatePoint(node.states[idx], point) if err != nil { return err } } } return nil }