示例#1
0
func (self *CoordinatorImpl) runQuerySpec(querySpec *parser.QuerySpec, seriesWriter SeriesWriter) error {
	shards, processor, seriesClosed, err := self.getShardsAndProcessor(querySpec, seriesWriter)
	if err != nil {
		return err
	}

	if len(shards) == 0 {
		return fmt.Errorf("Couldn't look up columns")
	}

	defer func() {
		if processor != nil {
			processor.Close()
			<-seriesClosed
		} else {
			seriesWriter.Close()
		}
	}()

	shardConcurrentLimit := self.config.ConcurrentShardQueryLimit
	if self.shouldQuerySequentially(shards, querySpec) {
		log.Debug("Querying shards sequentially")
		shardConcurrentLimit = 1
	}
	log.Debug("Shard concurrent limit: %d", shardConcurrentLimit)

	errors := make(chan error, shardConcurrentLimit)
	for i := 0; i < shardConcurrentLimit; i++ {
		errors <- nil
	}
	responseChannels := make(chan (<-chan *protocol.Response), shardConcurrentLimit)

	go self.readFromResponseChannels(processor, seriesWriter, querySpec.IsExplainQuery(), errors, responseChannels)

	err = self.queryShards(querySpec, shards, errors, responseChannels)

	// make sure we read the rest of the errors and responses
	for _err := range errors {
		if err == nil {
			err = _err
		}
	}

	for responsechan := range responseChannels {
		for response := range responsechan {
			if response.GetType() != endStreamResponse {
				continue
			}
			if response.ErrorMessage != nil && err == nil {
				err = common.NewQueryError(common.InvalidArgument, *response.ErrorMessage)
			}
			break
		}
	}
	return err
}
示例#2
0
func (self *CoordinatorImpl) getShardsAndProcessor(querySpec *parser.QuerySpec, writer SeriesWriter) ([]*cluster.ShardData, cluster.QueryProcessor, chan bool, error) {
	shards := self.clusterConfiguration.GetShardsForQuery(querySpec)
	shouldAggregateLocally := self.shouldAggregateLocally(shards, querySpec)

	var err error
	var processor cluster.QueryProcessor

	responseChan := make(chan *protocol.Response)
	seriesClosed := make(chan bool)

	selectQuery := querySpec.SelectQuery()
	if selectQuery != nil {
		if !shouldAggregateLocally {
			// if we should aggregate in the coordinator (i.e. aggregation
			// isn't happening locally at the shard level), create an engine
			processor, err = engine.NewQueryEngine(querySpec.SelectQuery(), responseChan)
		} else {
			// if we have a query with limit, then create an engine, or we can
			// make the passthrough limit aware
			processor = engine.NewPassthroughEngineWithLimit(responseChan, 100, selectQuery.Limit)
		}
	} else if !shouldAggregateLocally {
		processor = engine.NewPassthroughEngine(responseChan, 100)
	}

	if err != nil {
		return nil, nil, nil, err
	}

	if processor == nil {
		return shards, nil, nil, nil
	}

	go func() {
		for {
			response := <-responseChan

			if *response.Type == endStreamResponse || *response.Type == accessDeniedResponse {
				writer.Close()
				seriesClosed <- true
				return
			}
			if !(*response.Type == queryResponse && querySpec.IsExplainQuery()) {
				if response.Series != nil && len(response.Series.Points) > 0 {
					writer.Write(response.Series)
				}
			}
		}
	}()

	return shards, processor, seriesClosed, nil
}