func (self *CoordinatorImpl) InterpolateValuesAndCommit(query string, db string, series *protocol.Series, targetName string, assignSequenceNumbers bool) error { defer common.RecoverFunc(db, query, nil) targetName = strings.Replace(targetName, ":series_name", *series.Name, -1) type sequenceKey struct { seriesName string timestamp int64 } sequenceMap := make(map[sequenceKey]int) r, _ := regexp.Compile(`\[.*?\]`) if r.MatchString(targetName) { serieses := map[string]*protocol.Series{} for _, point := range series.Points { targetNameWithValues := r.ReplaceAllStringFunc(targetName, func(match string) string { fieldName := match[1 : len(match)-1] fieldIndex := series.GetFieldIndex(fieldName) return point.GetFieldValueAsString(fieldIndex) }) if assignSequenceNumbers { key := sequenceKey{targetNameWithValues, *point.Timestamp} sequenceMap[key] += 1 sequenceNumber := uint64(sequenceMap[key]) point.SequenceNumber = &sequenceNumber } newSeries := serieses[targetNameWithValues] if newSeries == nil { newSeries = &protocol.Series{Name: &targetNameWithValues, Fields: series.Fields, Points: []*protocol.Point{point}} serieses[targetNameWithValues] = newSeries continue } newSeries.Points = append(newSeries.Points, point) } seriesSlice := make([]*protocol.Series, 0, len(serieses)) for _, s := range serieses { seriesSlice = append(seriesSlice, s) } if e := self.CommitSeriesData(db, seriesSlice, true); e != nil { log.Error("Couldn't write data for continuous query: ", e) } } else { newSeries := &protocol.Series{Name: &targetName, Fields: series.Fields, Points: series.Points} if assignSequenceNumbers { for _, point := range newSeries.Points { sequenceMap[sequenceKey{targetName, *point.Timestamp}] += 1 sequenceNumber := uint64(sequenceMap[sequenceKey{targetName, *point.Timestamp}]) point.SequenceNumber = &sequenceNumber } } if e := self.CommitSeriesData(db, []*protocol.Series{newSeries}, true); e != nil { log.Error("Couldn't write data for continuous query: ", e) } } return nil }
func (self *CoordinatorImpl) RunQuery(user common.User, database string, queryString string, seriesWriter SeriesWriter) (err error) { log.Info("Query: db: %s, u: %s, q: %s", database, user.GetName(), queryString) // don't let a panic pass beyond RunQuery defer common.RecoverFunc(database, queryString, nil) q, err := parser.ParseQuery(queryString) if err != nil { return err } for _, query := range q { querySpec := parser.NewQuerySpec(user, database, query) if query.DeleteQuery != nil { if err := self.clusterConfiguration.CreateCheckpoint(); err != nil { return err } if err := self.runDeleteQuery(querySpec, seriesWriter); err != nil { return err } continue } if query.DropQuery != nil { if err := self.DeleteContinuousQuery(user, database, uint32(query.DropQuery.Id)); err != nil { return err } continue } if query.IsListQuery() { if query.IsListSeriesQuery() { self.runListSeriesQuery(querySpec, seriesWriter) } else if query.IsListContinuousQueriesQuery() { queries, err := self.ListContinuousQueries(user, database) if err != nil { return err } for _, q := range queries { if err := seriesWriter.Write(q); err != nil { return err } } } continue } if query.DropSeriesQuery != nil { err := self.runDropSeriesQuery(querySpec, seriesWriter) if err != nil { return err } continue } selectQuery := query.SelectQuery if selectQuery.IsContinuousQuery() { return self.CreateContinuousQuery(user, database, queryString) } if err := self.checkPermission(user, querySpec); err != nil { return err } return self.runQuery(querySpec, seriesWriter) } seriesWriter.Close() return nil }
func (self *ShardData) Query(querySpec *parser.QuerySpec, response chan *p.Response) { log.Debug("QUERY: shard %d, query '%s'", self.Id(), querySpec.GetQueryString()) defer common.RecoverFunc(querySpec.Database(), querySpec.GetQueryString(), func(err interface{}) { response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(fmt.Sprintf("%s", err))} }) // This is only for queries that are deletes or drops. They need to be sent everywhere as opposed to just the local or one of the remote shards. // But this boolean should only be set to true on the server that receives the initial query. if querySpec.RunAgainstAllServersInShard { if querySpec.IsDeleteFromSeriesQuery() { self.logAndHandleDeleteQuery(querySpec, response) } else if querySpec.IsDropSeriesQuery() { self.logAndHandleDropSeriesQuery(querySpec, response) } } if self.IsLocal { var processor QueryProcessor var err error if querySpec.IsListSeriesQuery() { processor = engine.NewListSeriesEngine(response) } else if querySpec.IsDeleteFromSeriesQuery() || querySpec.IsDropSeriesQuery() || querySpec.IsSinglePointQuery() { maxDeleteResults := 10000 processor = engine.NewPassthroughEngine(response, maxDeleteResults) } else { query := querySpec.SelectQuery() if self.ShouldAggregateLocally(querySpec) { log.Debug("creating a query engine") processor, err = engine.NewQueryEngine(query, response) if err != nil { response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())} log.Error("Error while creating engine: %s", err) return } processor.SetShardInfo(int(self.Id()), self.IsLocal) } else if query.HasAggregates() { maxPointsToBufferBeforeSending := 1000 log.Debug("creating a passthrough engine") processor = engine.NewPassthroughEngine(response, maxPointsToBufferBeforeSending) } else { maxPointsToBufferBeforeSending := 1000 log.Debug("creating a passthrough engine with limit") processor = engine.NewPassthroughEngineWithLimit(response, maxPointsToBufferBeforeSending, query.Limit) } if query.GetFromClause().Type != parser.FromClauseInnerJoin { // Joins do their own filtering since we need to get all // points before filtering. This is due to the fact that some // where expressions will be difficult to compute before the // points are joined together, think where clause with // left.column = 'something' or right.column = // 'something_else'. We can't filter the individual series // separately. The filtering happens in merge.go:55 processor = engine.NewFilteringEngine(query, processor) } } shard, err := self.store.GetOrCreateShard(self.id) if err != nil { response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())} log.Error("Error while getting shards: %s", err) return } defer self.store.ReturnShard(self.id) err = shard.Query(querySpec, processor) // if we call Close() in case of an error it will mask the error if err != nil { response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())} return } processor.Close() response <- &p.Response{Type: &endStreamResponse} return } if server := self.randomHealthyServer(); server != nil { log.Debug("Querying server %d for shard %d", server.GetId(), self.Id()) request := self.createRequest(querySpec) server.MakeRequest(request, response) return } message := fmt.Sprintf("No servers up to query shard %d", self.id) response <- &p.Response{Type: &endStreamResponse, ErrorMessage: &message} log.Error(message) }
func (self *CoordinatorImpl) InterpolateValuesAndCommit(query string, db string, series *protocol.Series, targetName string, assignSequenceNumbers bool) error { defer common.RecoverFunc(db, query, nil) targetName = strings.Replace(targetName, ":series_name", *series.Name, -1) type sequenceKey struct { seriesName string timestamp int64 } sequenceMap := make(map[sequenceKey]int) r, _ := regexp.Compile(`\[.*?\]`) // get the fields that are used in the target name fieldsInTargetName := r.FindAllString(targetName, -1) fieldsIndeces := make([]int, 0, len(fieldsInTargetName)) for i, f := range fieldsInTargetName { f = f[1 : len(f)-1] fieldsIndeces = append(fieldsIndeces, series.GetFieldIndex(f)) fieldsInTargetName[i] = f } fields := make([]string, 0, len(series.Fields)-len(fieldsIndeces)) // remove the fields used in the target name from the series fields nextfield: for i, f := range series.Fields { for _, fi := range fieldsIndeces { if fi == i { continue nextfield } } fields = append(fields, f) } if r.MatchString(targetName) { serieses := map[string]*protocol.Series{} for _, point := range series.Points { fieldIndex := 0 targetNameWithValues := r.ReplaceAllStringFunc(targetName, func(_ string) string { value := point.GetFieldValueAsString(fieldsIndeces[fieldIndex]) fieldIndex++ return value }) p := &protocol.Point{ Values: make([]*protocol.FieldValue, 0, len(point.Values)-len(fieldsIndeces)), Timestamp: point.Timestamp, SequenceNumber: point.SequenceNumber, } // remove the fields used in the target name from the series fields nextvalue: for i, v := range point.Values { for _, fi := range fieldsIndeces { if fi == i { continue nextvalue } } p.Values = append(p.Values, v) } if assignSequenceNumbers { key := sequenceKey{targetNameWithValues, *p.Timestamp} sequenceMap[key] += 1 sequenceNumber := uint64(sequenceMap[key]) p.SequenceNumber = &sequenceNumber } newSeries := serieses[targetNameWithValues] if newSeries == nil { newSeries = &protocol.Series{Name: &targetNameWithValues, Fields: fields, Points: []*protocol.Point{p}} serieses[targetNameWithValues] = newSeries continue } newSeries.Points = append(newSeries.Points, p) } seriesSlice := make([]*protocol.Series, 0, len(serieses)) for _, s := range serieses { seriesSlice = append(seriesSlice, s) } if e := self.CommitSeriesData(db, seriesSlice, true); e != nil { log.Error("Couldn't write data for continuous query: ", e) } } else { newSeries := &protocol.Series{Name: &targetName, Fields: fields, Points: series.Points} if assignSequenceNumbers { for _, point := range newSeries.Points { sequenceMap[sequenceKey{targetName, *point.Timestamp}] += 1 sequenceNumber := uint64(sequenceMap[sequenceKey{targetName, *point.Timestamp}]) point.SequenceNumber = &sequenceNumber } } if e := self.CommitSeriesData(db, []*protocol.Series{newSeries}, true); e != nil { log.Error("Couldn't write data for continuous query: ", e) } } return nil }