func (self *Coordinator) getShardsAndProcessor(querySpec *parser.QuerySpec, writer engine.Processor) ([]*cluster.ShardData, engine.Processor, error) { shards, err := self.clusterConfiguration.GetShardsForQuery(querySpec) if err != nil { return nil, nil, err } shouldAggregateLocally := shards.ShouldAggregateLocally(querySpec) q := querySpec.SelectQuery() if q == nil { return shards, writer, nil } if !shouldAggregateLocally { // if we should aggregate in the coordinator (i.e. aggregation // isn't happening locally at the shard level), create an engine shardIds := make([]uint32, len(shards)) for i, s := range shards { shardIds[i] = s.Id() } writer, err = engine.NewQueryEngine(writer, q, shardIds) if err != nil { log.Error(err) log.Debug("Coordinator processor chain: %s", engine.ProcessorChain(writer)) } return shards, writer, err } // if we have a query with limit, then create an engine, or we can // make the passthrough limit aware writer = engine.NewPassthroughEngineWithLimit(writer, 100, q.Limit) return shards, writer, nil }
func (self *ShardData) getProcessor(querySpec *parser.QuerySpec, processor engine.Processor) (engine.Processor, error) { switch qt := querySpec.Query().Type(); qt { case parser.Delete, parser.DropSeries: return NilProcessor{}, nil case parser.Select: // continue default: panic(fmt.Errorf("Unexpected query type: %s", qt)) } if querySpec.IsSinglePointQuery() { return engine.NewPassthroughEngine(processor, 1), nil } query := querySpec.SelectQuery() var err error // We should aggregate at the shard level if self.ShouldAggregateLocally(querySpec) { log.Debug("creating a query engine") processor, err = engine.NewQueryEngine(processor, query, nil) if err != nil { return nil, err } goto addFilter } // we shouldn't limit the queries if they have aggregates and aren't // aggregated locally, otherwise the aggregation result which happen // in the coordinator will get partial data and will be incorrect if query.HasAggregates() { log.Debug("creating a passthrough engine") processor = engine.NewPassthroughEngine(processor, 1000) goto addFilter } // This is an optimization so we don't send more data that we should // over the wire. The coordinator has its own Passthrough which does // the final limit. if l := query.Limit; l > 0 { log.Debug("creating a passthrough engine with limit") processor = engine.NewPassthroughEngineWithLimit(processor, 1000, query.Limit) } addFilter: if query := querySpec.SelectQuery(); query != nil && query.GetFromClause().Type != parser.FromClauseInnerJoin { // Joins do their own filtering since we need to get all // points before filtering. This is due to the fact that some // where expressions will be difficult to compute before the // points are joined together, think where clause with // left.column = 'something' or right.column = // 'something_else'. We can't filter the individual series // separately. The filtering happens in merge.go:55 processor = engine.NewFilteringEngine(query, processor) } return processor, nil }
func (self *CoordinatorImpl) getShardsAndProcessor(querySpec *parser.QuerySpec, writer SeriesWriter) ([]*cluster.ShardData, cluster.QueryProcessor, chan bool, error) { shards := self.clusterConfiguration.GetShardsForQuery(querySpec) shouldAggregateLocally := self.shouldAggregateLocally(shards, querySpec) var err error var processor cluster.QueryProcessor responseChan := make(chan *protocol.Response) seriesClosed := make(chan bool) selectQuery := querySpec.SelectQuery() if selectQuery != nil { if !shouldAggregateLocally { // if we should aggregate in the coordinator (i.e. aggregation // isn't happening locally at the shard level), create an engine processor, err = engine.NewQueryEngine(querySpec.SelectQuery(), responseChan) } else { // if we have a query with limit, then create an engine, or we can // make the passthrough limit aware processor = engine.NewPassthroughEngineWithLimit(responseChan, 100, selectQuery.Limit) } } else if !shouldAggregateLocally { processor = engine.NewPassthroughEngine(responseChan, 100) } if err != nil { return nil, nil, nil, err } if processor == nil { return shards, nil, nil, nil } go func() { for { response := <-responseChan if *response.Type == endStreamResponse || *response.Type == accessDeniedResponse { writer.Close() seriesClosed <- true return } if !(*response.Type == queryResponse && querySpec.IsExplainQuery()) { if response.Series != nil && len(response.Series.Points) > 0 { writer.Write(response.Series) } } } }() return shards, processor, seriesClosed, nil }
func (self *Coordinator) getShardsAndProcessor(querySpec *parser.QuerySpec, writer engine.Processor) ([]*cluster.ShardData, engine.Processor, error) { shards := self.clusterConfiguration.GetShardsForQuery(querySpec) shouldAggregateLocally := shards.ShouldAggregateLocally(querySpec) var err error q := querySpec.SelectQuery() if q == nil { return shards, writer, nil } if !shouldAggregateLocally { // if we should aggregate in the coordinator (i.e. aggregation // isn't happening locally at the shard level), create an engine writer, err = engine.NewQueryEngine(writer, q) return shards, writer, err } // if we have a query with limit, then create an engine, or we can // make the passthrough limit aware writer = engine.NewPassthroughEngineWithLimit(writer, 100, q.Limit) return shards, writer, nil }
func (self *ShardData) Query(querySpec *parser.QuerySpec, response chan *p.Response) { log.Debug("QUERY: shard %d, query '%s'", self.Id(), querySpec.GetQueryString()) defer common.RecoverFunc(querySpec.Database(), querySpec.GetQueryString(), func(err interface{}) { response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(fmt.Sprintf("%s", err))} }) // This is only for queries that are deletes or drops. They need to be sent everywhere as opposed to just the local or one of the remote shards. // But this boolean should only be set to true on the server that receives the initial query. if querySpec.RunAgainstAllServersInShard { if querySpec.IsDeleteFromSeriesQuery() { self.logAndHandleDeleteQuery(querySpec, response) } else if querySpec.IsDropSeriesQuery() { self.logAndHandleDropSeriesQuery(querySpec, response) } } if self.IsLocal { var processor QueryProcessor var err error if querySpec.IsListSeriesQuery() { processor = engine.NewListSeriesEngine(response) } else if querySpec.IsDeleteFromSeriesQuery() || querySpec.IsDropSeriesQuery() || querySpec.IsSinglePointQuery() { maxDeleteResults := 10000 processor = engine.NewPassthroughEngine(response, maxDeleteResults) } else { query := querySpec.SelectQuery() if self.ShouldAggregateLocally(querySpec) { log.Debug("creating a query engine") processor, err = engine.NewQueryEngine(query, response) if err != nil { response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())} log.Error("Error while creating engine: %s", err) return } processor.SetShardInfo(int(self.Id()), self.IsLocal) } else if query.HasAggregates() { maxPointsToBufferBeforeSending := 1000 log.Debug("creating a passthrough engine") processor = engine.NewPassthroughEngine(response, maxPointsToBufferBeforeSending) } else { maxPointsToBufferBeforeSending := 1000 log.Debug("creating a passthrough engine with limit") processor = engine.NewPassthroughEngineWithLimit(response, maxPointsToBufferBeforeSending, query.Limit) } if query.GetFromClause().Type != parser.FromClauseInnerJoin { // Joins do their own filtering since we need to get all // points before filtering. This is due to the fact that some // where expressions will be difficult to compute before the // points are joined together, think where clause with // left.column = 'something' or right.column = // 'something_else'. We can't filter the individual series // separately. The filtering happens in merge.go:55 processor = engine.NewFilteringEngine(query, processor) } } shard, err := self.store.GetOrCreateShard(self.id) if err != nil { response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())} log.Error("Error while getting shards: %s", err) return } defer self.store.ReturnShard(self.id) err = shard.Query(querySpec, processor) // if we call Close() in case of an error it will mask the error if err != nil { response <- &p.Response{Type: &endStreamResponse, ErrorMessage: p.String(err.Error())} return } processor.Close() response <- &p.Response{Type: &endStreamResponse} return } if server := self.randomHealthyServer(); server != nil { log.Debug("Querying server %d for shard %d", server.GetId(), self.Id()) request := self.createRequest(querySpec) server.MakeRequest(request, response) return } message := fmt.Sprintf("No servers up to query shard %d", self.id) response <- &p.Response{Type: &endStreamResponse, ErrorMessage: &message} log.Error(message) }