func (self *CoordinatorImpl) runQuerySpec(querySpec *parser.QuerySpec, seriesWriter SeriesWriter) error { shards, processor, seriesClosed, err := self.getShardsAndProcessor(querySpec, seriesWriter) if err != nil { return err } defer func() { if processor != nil { processor.Close() <-seriesClosed } else { seriesWriter.Close() } }() shardConcurrentLimit := self.config.ConcurrentShardQueryLimit if self.shouldQuerySequentially(shards, querySpec) { log.Debug("Querying shards sequentially") shardConcurrentLimit = 1 } log.Debug("Shard concurrent limit: ", shardConcurrentLimit) errors := make(chan error, shardConcurrentLimit) for i := 0; i < shardConcurrentLimit; i++ { errors <- nil } responseChannels := make(chan (<-chan *protocol.Response), shardConcurrentLimit) go self.readFromResposneChannels(processor, seriesWriter, querySpec.IsExplainQuery(), errors, responseChannels) err = self.queryShards(querySpec, shards, errors, responseChannels) // make sure we read the rest of the errors and responses for _err := range errors { if err == nil { err = _err } } for responsechan := range responseChannels { for response := range responsechan { if response.GetType() != endStreamResponse { continue } if response.ErrorMessage != nil && err == nil { err = common.NewQueryError(common.InvalidArgument, *response.ErrorMessage) } break } } return err }
func (self *CoordinatorImpl) getShardsAndProcessor(querySpec *parser.QuerySpec, writer SeriesWriter) ([]*cluster.ShardData, cluster.QueryProcessor, chan bool, error) { shards := self.clusterConfiguration.GetShards(querySpec) shouldAggregateLocally := self.shouldAggregateLocally(shards, querySpec) var err error var processor cluster.QueryProcessor responseChan := make(chan *protocol.Response) seriesClosed := make(chan bool) selectQuery := querySpec.SelectQuery() if selectQuery != nil { if !shouldAggregateLocally { // if we should aggregate in the coordinator (i.e. aggregation // isn't happening locally at the shard level), create an engine processor, err = engine.NewQueryEngine(querySpec.SelectQuery(), responseChan) } else { // if we have a query with limit, then create an engine, or we can // make the passthrough limit aware processor = engine.NewPassthroughEngineWithLimit(responseChan, 100, selectQuery.Limit) } } else if !shouldAggregateLocally { processor = engine.NewPassthroughEngine(responseChan, 100) } if err != nil { return nil, nil, nil, err } if processor == nil { return shards, nil, nil, nil } go func() { for { response := <-responseChan if *response.Type == endStreamResponse || *response.Type == accessDeniedResponse { writer.Close() seriesClosed <- true return } if !(*response.Type == queryResponse && querySpec.IsExplainQuery()) { if response.Series != nil && len(response.Series.Points) > 0 { writer.Write(response.Series) } } } }() return shards, processor, seriesClosed, nil }
func (self *CoordinatorImpl) runQuerySpec(querySpec *parser.QuerySpec, seriesWriter SeriesWriter) error { shards, processor, seriesClosed, err := self.getShardsAndProcessor(querySpec, seriesWriter) if err != nil { return err } shardConcurrentLimit := self.config.ConcurrentShardQueryLimit if self.shouldQuerySequentially(shards, querySpec) { log.Debug("Querying shards sequentially") shardConcurrentLimit = 1 } log.Debug("Shard concurrent limit: ", shardConcurrentLimit) for i := 0; i < len(shards); i += shardConcurrentLimit { responses := make([]<-chan *protocol.Response, 0, shardConcurrentLimit) for j := 0; j < shardConcurrentLimit && i+j < len(shards); j++ { shard := shards[i+j] responseChan := make(chan *protocol.Response, shard.QueryResponseBufferSize(querySpec, self.config.LevelDbPointBatchSize)) // We query shards for data and stream them to query processor log.Debug("QUERYING: shard: ", i+j, shard.String()) go shard.Query(querySpec, responseChan) responses = append(responses, responseChan) } err := self.readFromResposneChannels(processor, seriesWriter, querySpec.IsExplainQuery(), responses) if err != nil { log.Error("Reading responses from channels returned an error: %s", err) return err } } if processor != nil { processor.Close() <-seriesClosed return err } seriesWriter.Close() return err }
func (self *CoordinatorImpl) runQuerySpec(querySpec *parser.QuerySpec, seriesWriter SeriesWriter) error { shards, processor, seriesClosed, err := self.getShardsAndProcessor(querySpec, seriesWriter) if err != nil { return err } responses := make([]chan *protocol.Response, 0) for _, shard := range shards { responseChan := make(chan *protocol.Response, self.config.QueryShardBufferSize) // We query shards for data and stream them to query processor go shard.Query(querySpec, responseChan) responses = append(responses, responseChan) } for i, responseChan := range responses { log.Debug("READING: shard: ", shards[i].String()) for { response := <-responseChan //log.Debug("GOT RESPONSE: ", response.Type, response.Series) log.Debug("GOT RESPONSE: ", response.Type) if *response.Type == endStreamResponse || *response.Type == accessDeniedResponse { if response.ErrorMessage != nil && err == nil { err = common.NewQueryError(common.InvalidArgument, *response.ErrorMessage) } break } if response.Series == nil || len(response.Series.Points) == 0 { log.Debug("Series has no points, continue") continue } // if we don't have a processor, yield the point to the writer // this happens if shard took care of the query // otherwise client will get points from passthrough engine if processor != nil { // if the data wasn't aggregated at the shard level, aggregate // the data here log.Debug("YIELDING: %d points", len(response.Series.Points)) processor.YieldSeries(response.Series) continue } // If we have EXPLAIN query, we don't write actual points (of // response.Type Query) to the client if !(*response.Type == queryResponse && querySpec.IsExplainQuery()) { seriesWriter.Write(response.Series) } } log.Debug("DONE: shard: ", shards[i].String()) } if processor != nil { processor.Close() <-seriesClosed return err } seriesWriter.Close() return err }