コード例 #1
0
ファイル: coordinator.go プロジェクト: carriercomm/facette
func (self *Coordinator) getShardsAndProcessor(querySpec *parser.QuerySpec, writer engine.Processor) ([]*cluster.ShardData, engine.Processor, error) {
	shards, err := self.clusterConfiguration.GetShardsForQuery(querySpec)
	if err != nil {
		return nil, nil, err
	}
	shouldAggregateLocally := shards.ShouldAggregateLocally(querySpec)

	q := querySpec.SelectQuery()
	if q == nil {
		return shards, writer, nil
	}

	if !shouldAggregateLocally {
		// if we should aggregate in the coordinator (i.e. aggregation
		// isn't happening locally at the shard level), create an engine
		shardIds := make([]uint32, len(shards))
		for i, s := range shards {
			shardIds[i] = s.Id()
		}
		writer, err = engine.NewQueryEngine(writer, q, shardIds)
		if err != nil {
			log.Error(err)
			log.Debug("Coordinator processor chain: %s", engine.ProcessorChain(writer))
		}
		return shards, writer, err
	}

	// if we have a query with limit, then create an engine, or we can
	// make the passthrough limit aware
	writer = engine.NewPassthroughEngineWithLimit(writer, 100, q.Limit)
	return shards, writer, nil
}
コード例 #2
0
ファイル: shard.go プロジェクト: vovkasm/facette
func (self *ShardData) Query(querySpec *parser.QuerySpec, response chan<- *p.Response) {
	log.Debug("QUERY: shard %d, query '%s'", self.Id(), querySpec.GetQueryStringWithTimeCondition())
	defer common.RecoverFunc(querySpec.Database(), querySpec.GetQueryStringWithTimeCondition(), func(err interface{}) {
		response <- &p.Response{
			Type:         p.Response_ERROR.Enum(),
			ErrorMessage: p.String(fmt.Sprintf("%s", err)),
		}
	})

	// This is only for queries that are deletes or drops. They need to be sent everywhere as opposed to just the local or one of the remote shards.
	// But this boolean should only be set to true on the server that receives the initial query.
	if querySpec.RunAgainstAllServersInShard {
		if querySpec.IsDeleteFromSeriesQuery() {
			self.logAndHandleDeleteQuery(querySpec, response)
		} else if querySpec.IsDropSeriesQuery() {
			self.logAndHandleDropSeriesQuery(querySpec, response)
		}
	}

	if self.IsLocal {
		var processor engine.Processor = NewResponseChannelProcessor(NewResponseChannelWrapper(response))
		var err error

		processor = NewShardIdInserterProcessor(self.Id(), processor)

		processor, err = self.getProcessor(querySpec, processor)
		if err != nil {
			response <- &p.Response{
				Type:         p.Response_ERROR.Enum(),
				ErrorMessage: p.String(err.Error()),
			}
			log.Error("Error while creating engine: %s", err)
			return
		}
		shard, err := self.store.GetOrCreateShard(self.id)
		if err != nil {
			response <- &p.Response{
				Type:         p.Response_ERROR.Enum(),
				ErrorMessage: p.String(err.Error()),
			}
			log.Error("Error while getting shards: %s", err)
			return
		}
		defer self.store.ReturnShard(self.id)

		log.Debug("Processor chain:  %s\n", engine.ProcessorChain(processor))

		err = shard.Query(querySpec, processor)
		// if we call Close() in case of an error it will mask the error
		if err != nil {
			response <- &p.Response{
				Type:         p.Response_ERROR.Enum(),
				ErrorMessage: p.String(err.Error()),
			}
			return
		}
		processor.Close()
		response <- &p.Response{Type: p.Response_END_STREAM.Enum()}
		return
	}

	if server := self.randomHealthyServer(); server != nil {
		log.Debug("Querying server %d for shard %d", server.GetId(), self.Id())
		request := self.createRequest(querySpec)
		server.MakeRequest(request, response)
		return
	}

	message := fmt.Sprintf("No servers up to query shard %d", self.id)
	response <- &p.Response{
		Type:         p.Response_ERROR.Enum(),
		ErrorMessage: &message,
	}
	log.Error(message)
}