func createResponse(nextPointMap map[string]*NextPoint, series *protocol.Series, id *uint32) *protocol.Response { pointCount := len(series.Points) if pointCount < 1 { if nextPoint := nextPointMap[*series.Name]; nextPoint != nil { series.Points = append(series.Points, nextPoint.point) series.Fields = nextPoint.fields } response := &protocol.Response{Type: &queryResponse, Series: series, RequestId: id} return response } oldNextPoint := nextPointMap[*series.Name] nextPoint := series.Points[pointCount-1] series.Points[pointCount-1] = nil if oldNextPoint != nil { copy(series.Points[1:], series.Points[0:]) series.Points[0] = oldNextPoint.point } else { series.Points = series.Points[:len(series.Points)-1] } response := &protocol.Response{Series: series, Type: &queryResponse, RequestId: id} if nextPoint != nil { response.NextPointTime = nextPoint.Timestamp nextPointMap[*series.Name] = &NextPoint{series.Fields, nextPoint} } return response }
func Filter(query *parser.SelectQuery, series *protocol.Series) (*protocol.Series, error) { if query.GetWhereCondition() == nil { return series, nil } columns := map[string]struct{}{} if query.GetFromClause().Type == parser.FromClauseInnerJoin { outer: for t, cs := range query.GetResultColumns() { for _, c := range cs { // if this is a wildcard select, then drop all columns and // just use '*' if c == "*" { columns = make(map[string]struct{}, 1) columns[c] = struct{}{} break outer } columns[t.Name+"."+c] = struct{}{} } } } else { for _, cs := range query.GetResultColumns() { for _, c := range cs { columns[c] = struct{}{} } } } points := series.Points series.Points = nil for _, point := range points { ok, err := matches(query.GetWhereCondition(), series.Fields, point) if err != nil { return nil, err } if ok { fmt.Printf("columns: %v, fields: %v\n", columns, series.Fields) filterColumns(columns, series.Fields, point) series.Points = append(series.Points, point) } } if _, ok := columns["*"]; !ok { newFields := []string{} for _, f := range series.Fields { if _, ok := columns[f]; !ok { continue } newFields = append(newFields, f) } series.Fields = newFields } return series, nil }
func (self *CoordinatorImpl) CommitSeriesData(db string, series *protocol.Series) error { lastPointIndex := 0 now := common.CurrentTime() var shardToWrite cluster.Shard for _, point := range series.Points { if point.Timestamp == nil { point.Timestamp = &now } } lastTime := int64(math.MinInt64) if len(series.Points) > 0 && *series.Points[0].Timestamp == lastTime { // just a hack to make sure lastTime will never equal the first // point's timestamp lastTime = 0 } // sort the points by timestamp series.SortPointsTimeDescending() for i, point := range series.Points { if *point.Timestamp != lastTime { shard, err := self.clusterConfiguration.GetShardToWriteToBySeriesAndTime(db, *series.Name, *point.Timestamp) if err != nil { return err } if shardToWrite == nil { shardToWrite = shard } else if shardToWrite.Id() != shard.Id() { newIndex := i newSeries := &protocol.Series{Name: series.Name, Fields: series.Fields, Points: series.Points[lastPointIndex:newIndex]} if err := self.write(db, newSeries, shardToWrite); err != nil { return err } lastPointIndex = newIndex shardToWrite = shard } lastTime = *point.Timestamp } } series.Points = series.Points[lastPointIndex:] if len(series.Points) > 0 { if shardToWrite == nil { shardToWrite, _ = self.clusterConfiguration.GetShardToWriteToBySeriesAndTime(db, *series.Name, *series.Points[0].Timestamp) } err := self.write(db, series, shardToWrite) if err != nil { log.Error("COORD error writing: ", err) return err } return err } return nil }
func (self *Limiter) calculateLimitAndSlicePoints(series *protocol.Series) { if self.shouldLimit { // if the limit is 0, stop returning any points limit := self.limitForSeries(*series.Name) defer func() { self.limits[*series.Name] = limit }() if limit == 0 { series.Points = nil return } limit -= len(series.Points) if limit <= 0 { sliceTo := len(series.Points) + limit series.Points = series.Points[0:sliceTo] limit = 0 } } }
// merges two time series making sure that the resulting series has // the union of the two series columns and the values set // properly. will panic if the two series don't have the same name func MergeSeries(s1, s2 *protocol.Series) *protocol.Series { if s1.GetName() != s2.GetName() { panic("the two series don't have the same name") } // if the two series have the same columns and in the same order // append the points and return. if reflect.DeepEqual(s1.Fields, s2.Fields) { s1.Points = append(s1.Points, s2.Points...) return s1 } columns := map[string]struct{}{} for _, cs := range [][]string{s1.Fields, s2.Fields} { for _, c := range cs { columns[c] = struct{}{} } } points := append(pointMaps(s1), pointMaps(s2)...) fieldsSlice := make([]string, 0, len(columns)) for c := range columns { fieldsSlice = append(fieldsSlice, c) } resultPoints := make([]*protocol.Point, 0, len(points)) for idx, point := range points { resultPoint := &protocol.Point{} for _, field := range fieldsSlice { value := point[field] if value == nil { value = &protocol.FieldValue{ IsNull: &TRUE, } } resultPoint.Values = append(resultPoint.Values, value) if idx < len(s1.Points) { resultPoint.Timestamp = s1.Points[idx].Timestamp resultPoint.SequenceNumber = s1.Points[idx].SequenceNumber } else { resultPoint.Timestamp = s2.Points[idx-len(s1.Points)].Timestamp resultPoint.SequenceNumber = s2.Points[idx-len(s1.Points)].SequenceNumber } } resultPoints = append(resultPoints, resultPoint) } // otherwise, merge the columns result := &protocol.Series{ Name: s1.Name, Fields: fieldsSlice, Points: resultPoints, } return result }
func Filter(query *parser.SelectQuery, series *protocol.Series) (*protocol.Series, error) { if query.GetWhereCondition() == nil { return series, nil } columns := map[string]bool{} getColumns(query.GetColumnNames(), columns) getColumns(query.GetGroupByClause().Elems, columns) points := series.Points series.Points = nil for _, point := range points { ok, err := matches(query.GetWhereCondition(), series.Fields, point) if err != nil { return nil, err } if ok { filterColumns(columns, series.Fields, point) series.Points = append(series.Points, point) } } if !columns["*"] { newFields := []string{} for _, f := range series.Fields { if _, ok := columns[f]; !ok { continue } newFields = append(newFields, f) } series.Fields = newFields } return series, nil }
func (self *CoordinatorImpl) normalizePointAndAppend(fieldNames map[string]int, result *protocol.Series, fields []string, point *protocol.Point) { oldValues := point.Values point.Values = make([]*protocol.FieldValue, len(fieldNames), len(fieldNames)) for index, field := range fields { indexForField, ok := fieldNames[field] // drop this point on the floor if the unexpected happens if !ok { log.Error("Couldn't lookup field: ", field, fields, fieldNames) return } point.Values[indexForField] = oldValues[index] } result.Points = append(result.Points, point) }
func (self *CoordinatorImpl) CommitSeriesData(db string, series *protocol.Series) error { lastTime := int64(0) lastPointIndex := 0 now := common.CurrentTime() var shardToWrite cluster.Shard for i, point := range series.Points { if point.Timestamp == nil { point.Timestamp = &now } if *point.Timestamp != lastTime { shard, err := self.clusterConfiguration.GetShardToWriteToBySeriesAndTime(db, *series.Name, *point.Timestamp) if err != nil { return err } if shardToWrite == nil { shardToWrite = shard } else if shardToWrite.Id() != shard.Id() { newIndex := i + 1 newSeries := &protocol.Series{Name: series.Name, Fields: series.Fields, Points: series.Points[lastPointIndex:newIndex]} self.write(db, newSeries, shardToWrite) lastPointIndex = newIndex shardToWrite = shard } lastTime = *point.Timestamp } } series.Points = series.Points[lastPointIndex:] if len(series.Points) > 0 { if shardToWrite == nil { shardToWrite, _ = self.clusterConfiguration.GetShardToWriteToBySeriesAndTime(db, *series.Name, *series.Points[0].Timestamp) } err := self.write(db, series, shardToWrite) if err != nil { log.Error("COORD error writing: ", err) } return err } return nil }
// TODO: refactor this for clarity. This got super ugly... // Function yields all results that are safe to do so ensuring order. Returns all results that must wait for more from the servers. func (self *CoordinatorImpl) yieldResultsForSeries(isAscending bool, leftover *protocol.Series, responses []*protocol.Response, yield func(*protocol.Series) error) *protocol.Series { // results can come from different servers. Some of which won't know about fields that other servers may know about. // We need to normalize all this so that all fields are represented and the other field values are null. // Give each unique field name an index. We'll use this map later to construct the results and make sure that // the response objects have their fields in the result. fieldIndexes := make(map[string]int) for _, response := range responses { for _, name := range response.Series.Fields { if _, hasField := fieldIndexes[name]; !hasField { fieldIndexes[name] = len(fieldIndexes) } } } fields := make([]string, len(fieldIndexes), len(fieldIndexes)) for name, index := range fieldIndexes { fields[index] = name } fieldCount := len(fields) result := &protocol.Series{Name: responses[0].Series.Name, Fields: fields, Points: make([]*protocol.Point, 0)} if leftover == nil { leftover = &protocol.Series{Name: responses[0].Series.Name, Fields: fields, Points: make([]*protocol.Point, 0)} } barrierTime := BARRIER_TIME_MIN if isAscending { barrierTime = BARRIER_TIME_MAX } var shouldYieldComparator func(rawTime *int64) bool if isAscending { shouldYieldComparator = func(rawTime *int64) bool { if rawTime != nil && *rawTime < barrierTime { return true } else { return false } } } else { shouldYieldComparator = func(rawTime *int64) bool { if rawTime != nil && *rawTime > barrierTime { return true } else { return false } } } // find the barrier time for _, response := range responses { if shouldYieldComparator(response.NextPointTime) { barrierTime = *response.NextPointTime } } // yield the points from leftover that are safe for _, point := range leftover.Points { if shouldYieldComparator(point.Timestamp) { result.Points = append(result.Points, point) } else { break } } // if they all got added, clear out the leftover if len(leftover.Points) == len(result.Points) { leftover.Points = make([]*protocol.Point, 0) } if barrierTime == BARRIER_TIME_MIN || barrierTime == BARRIER_TIME_MAX { // all the nextPointTimes were nil so we're safe to send everything for _, response := range responses { // if this is the case we know that all responses contained the same // fields. So just append the points if len(response.Series.Fields) == fieldCount { result.Points = append(result.Points, response.Series.Points...) } else { log.Debug("Responses from servers had different numbers of fields.") for _, p := range response.Series.Points { self.normalizePointAndAppend(fieldIndexes, result, response.Series.Fields, p) } } } if len(leftover.Fields) == fieldCount { result.Points = append(result.Points, leftover.Points...) leftover.Points = []*protocol.Point{} } else { log.Debug("Responses from servers had different numbers of fields.") for _, p := range leftover.Points { self.normalizePointAndAppend(fieldIndexes, result, leftover.Fields, p) } } } else { for _, response := range responses { if shouldYieldComparator(response.NextPointTime) { // all points safe to yield if fieldCount == len(response.Series.Fields) { result.Points = append(result.Points, response.Series.Points...) } else { log.Debug("Responses from servers had different numbers of fields.") for _, p := range response.Series.Points { self.normalizePointAndAppend(fieldIndexes, result, response.Series.Fields, p) } } continue } if fieldCount == len(response.Series.Fields) { for i, point := range response.Series.Points { if shouldYieldComparator(point.Timestamp) { result.Points = append(result.Points, point) } else { // since they're returned in order, we can just append these to // the leftover and break out. leftover.Points = append(leftover.Points, response.Series.Points[i:]...) break } } } else { for i, point := range response.Series.Points { if shouldYieldComparator(point.Timestamp) { self.normalizePointAndAppend(fieldIndexes, result, response.Series.Fields, point) } else { // since they're returned in order, we can just append these to // the leftover and break out. for _, point := range response.Series.Points[i:] { self.normalizePointAndAppend(fieldIndexes, leftover, response.Series.Fields, point) } break } } } } } if isAscending { result.SortPointsTimeAscending() leftover.SortPointsTimeAscending() } else { result.SortPointsTimeDescending() leftover.SortPointsTimeDescending() } // Don't yield an empty points array, the engine will think it's the end of the stream. // streamResultsFromChannels will send the empty ones after all channels have returned. if len(result.Points) > 0 { yield(result) } if len(leftover.Points) > 0 { return leftover } return nil }