func (self *Coordinator) runListSeriesQuery(querySpec *parser.QuerySpec, p engine.Processor) error { allSeries := self.clusterConfiguration.MetaStore.GetSeriesForDatabase(querySpec.Database()) matchingSeries := allSeries if q := querySpec.Query().GetListSeriesQuery(); q.HasRegex() { matchingSeries = nil regex := q.GetRegex() for _, s := range allSeries { if !regex.MatchString(s) { continue } matchingSeries = append(matchingSeries, s) } } name := "list_series_result" fields := []string{"name"} points := make([]*protocol.Point, len(matchingSeries), len(matchingSeries)) for i, s := range matchingSeries { fieldValues := []*protocol.FieldValue{{StringValue: proto.String(s)}} points[i] = &protocol.Point{Values: fieldValues} } seriesResult := &protocol.Series{Name: &name, Fields: fields, Points: points} _, err := p.Yield(seriesResult) return err }
func (self *Coordinator) runListContinuousQueries(user common.User, db string, p engine.Processor) error { queries, err := self.ListContinuousQueries(user, db) if err != nil { return err } for _, q := range queries { if ok, err := p.Yield(q); !ok || err != nil { return err } } return nil }
func (self *Coordinator) runListSeriesQuery(querySpec *parser.QuerySpec, p engine.Processor) error { allSeries := self.clusterConfiguration.MetaStore.GetSeriesForDatabase(querySpec.Database()) matchingSeries := allSeries q := querySpec.Query().GetListSeriesQuery() if q.HasRegex() { matchingSeries = nil regex := q.GetRegex() for _, s := range allSeries { if !regex.MatchString(s) { continue } matchingSeries = append(matchingSeries, s) } } name := "list_series_result" var fields []string points := make([]*protocol.Point, len(matchingSeries)) if q.IncludeSpaces { fields = []string{"name", "space"} spaces := self.clusterConfiguration.GetShardSpacesForDatabase(querySpec.Database()) for i, s := range matchingSeries { spaceName := "" for _, sp := range spaces { if sp.MatchesSeries(s) { spaceName = sp.Name break } } fieldValues := []*protocol.FieldValue{ {StringValue: proto.String(s)}, {StringValue: proto.String(spaceName)}, } points[i] = &protocol.Point{Values: fieldValues} } } else { fields = []string{"name"} for i, s := range matchingSeries { fieldValues := []*protocol.FieldValue{ {StringValue: proto.String(s)}, } points[i] = &protocol.Point{Values: fieldValues} } } seriesResult := &protocol.Series{Name: &name, Fields: fields, Points: points} _, err := p.Yield(seriesResult) return err }
func yieldToProcessor(s *protocol.Series, p engine.Processor, aliases []string) (bool, error) { for _, alias := range aliases { series := &protocol.Series{ Name: proto.String(alias), Fields: s.Fields, Points: s.Points, } log4go.Debug("Yielding to %s %s", p.Name(), series) if ok, err := p.Yield(series); !ok || err != nil { return ok, err } } return true, nil }
func (self *Shard) executeSinglePointQuery(querySpec *parser.QuerySpec, name string, columns []string, p engine.Processor) error { fields, err := self.getFieldsForSeries(querySpec.Database(), name, columns) if err != nil { log.Error("Error looking up fields for %s: %s", name, err) return err } query := querySpec.SelectQuery() fieldCount := len(fields) fieldNames := make([]string, 0, fieldCount) point := &protocol.Point{Values: make([]*protocol.FieldValue, 0, fieldCount)} timestamp := common.TimeToMicroseconds(query.GetStartTime()) sequenceNumber, err := query.GetSinglePointQuerySequenceNumber() if err != nil { return err } // set the timestamp and sequence number point.SequenceNumber = &sequenceNumber point.SetTimestampInMicroseconds(timestamp) for _, field := range fields { sk := newStorageKey(field.Id, timestamp, sequenceNumber) data, err := self.db.Get(sk.bytes()) if err != nil { return err } if data == nil { continue } fieldValue := &protocol.FieldValue{} err = proto.Unmarshal(data, fieldValue) if err != nil { return err } fieldNames = append(fieldNames, field.Name) point.Values = append(point.Values, fieldValue) } result := &protocol.Series{Name: &name, Fields: fieldNames, Points: []*protocol.Point{point}} if len(result.Points) > 0 { _, err := p.Yield(result) return err } return nil }
func (self *ShardData) Query(querySpec *parser.QuerySpec, response chan<- *p.Response) { log.Debug("QUERY: shard %d, query '%s'", self.Id(), querySpec.GetQueryStringWithTimeCondition()) defer common.RecoverFunc(querySpec.Database(), querySpec.GetQueryStringWithTimeCondition(), func(err interface{}) { response <- &p.Response{ Type: p.Response_ERROR.Enum(), ErrorMessage: p.String(fmt.Sprintf("%s", err)), } }) // This is only for queries that are deletes or drops. They need to be sent everywhere as opposed to just the local or one of the remote shards. // But this boolean should only be set to true on the server that receives the initial query. if querySpec.RunAgainstAllServersInShard { if querySpec.IsDeleteFromSeriesQuery() { self.logAndHandleDeleteQuery(querySpec, response) } else if querySpec.IsDropSeriesQuery() { self.logAndHandleDropSeriesQuery(querySpec, response) } } if self.IsLocal { var processor engine.Processor = NewResponseChannelProcessor(NewResponseChannelWrapper(response)) var err error processor = NewShardIdInserterProcessor(self.Id(), processor) processor, err = self.getProcessor(querySpec, processor) if err != nil { response <- &p.Response{ Type: p.Response_ERROR.Enum(), ErrorMessage: p.String(err.Error()), } log.Error("Error while creating engine: %s", err) return } shard, err := self.store.GetOrCreateShard(self.id) if err != nil { response <- &p.Response{ Type: p.Response_ERROR.Enum(), ErrorMessage: p.String(err.Error()), } log.Error("Error while getting shards: %s", err) return } defer self.store.ReturnShard(self.id) log.Debug("Processor chain: %s\n", engine.ProcessorChain(processor)) err = shard.Query(querySpec, processor) // if we call Close() in case of an error it will mask the error if err != nil { response <- &p.Response{ Type: p.Response_ERROR.Enum(), ErrorMessage: p.String(err.Error()), } return } processor.Close() response <- &p.Response{Type: p.Response_END_STREAM.Enum()} return } if server := self.randomHealthyServer(); server != nil { log.Debug("Querying server %d for shard %d", server.GetId(), self.Id()) request := self.createRequest(querySpec) server.MakeRequest(request, response) return } message := fmt.Sprintf("No servers up to query shard %d", self.id) response <- &p.Response{ Type: p.Response_ERROR.Enum(), ErrorMessage: &message, } log.Error(message) }