Esempio n. 1
0
func (self *Shard) executeQueryForSeries(querySpec *parser.QuerySpec, name string, columns []string, processor engine.Processor) error {
	if querySpec.IsSinglePointQuery() {
		log.Debug("Running single query for series %s", name)
		return self.executeSinglePointQuery(querySpec, name, columns, processor)
	}
	var pi *PointIterator
	var err error
	columns, pi, err = self.getPointIteratorForSeries(querySpec, name, columns)
	if err != nil {
		return err
	}
	defer pi.Close()

	query := querySpec.SelectQuery()
	aliases := query.GetTableAliases(name)

	seriesOutgoing := &protocol.Series{Name: protocol.String(name), Fields: columns, Points: make([]*protocol.Point, 0, self.pointBatchSize)}
	for pi.Valid() {
		p := pi.Point()
		seriesOutgoing.Points = append(seriesOutgoing.Points, p)
		if len(seriesOutgoing.Points) >= self.pointBatchSize {
			ok, err := yieldToProcessor(seriesOutgoing, processor, aliases)
			if !ok || err != nil {
				log.Debug("Stopping processing.")
				if err != nil {
					log.Error("Error while processing data: %v", err)
					return err
				}
				return nil
			}
			seriesOutgoing = &protocol.Series{Name: protocol.String(name), Fields: columns, Points: make([]*protocol.Point, 0, self.pointBatchSize)}
		}

		pi.Next()
	}

	if err := pi.Error(); err != nil {
		return err
	}

	//Yield remaining data
	if ok, err := yieldToProcessor(seriesOutgoing, processor, aliases); !ok || err != nil {
		log.Debug("Stopping processing remaining points...")
		if err != nil {
			log.Error("Error while processing data: %v", err)
			return err
		}
	}

	log.Debug("Finished running query %s", query.GetQueryString())
	return nil
}
Esempio n. 2
0
func (self *ClusterServer) heartbeat() {
	defer func() {
		self.heartbeatStarted = false
	}()

	for {
		// this chan is buffered and in the loop on purpose. This is so
		// that if reading a heartbeat times out, and the heartbeat then comes through
		// later, it will be dumped into this chan and not block the protobuf client reader.
		responseChan := make(chan *protocol.Response, 1)
		heartbeatRequest := &protocol.Request{
			Type:     &HEARTBEAT_TYPE,
			Database: protocol.String(""),
		}
		self.MakeRequest(heartbeatRequest, responseChan)
		err := self.getHeartbeatResponse(responseChan)
		if err != nil {
			self.handleHeartbeatError(err)
			continue
		}

		if !self.isUp {
			log.Warn("Server marked as up. Heartbeat succeeded")
		}
		// otherwise, reset the backoff and mark the server as up
		self.isUp = true
		self.Backoff = self.MinBackoff
		time.Sleep(self.HeartbeatInterval)
	}
}
Esempio n. 3
0
func (_ *WalSuite) TestRecoverWithNonWriteRequests(c *C) {
	wal := newWal(c)
	requestType := protocol.Request_QUERY
	request := &protocol.Request{
		Type:     &requestType,
		Database: protocol.String("some_db"),
	}
	wal.AssignSequenceNumbersAndLog(request, &MockServer{id: 1})
	c.Assert(wal.Close(), IsNil)
	wal, err := NewWAL(wal.config)
	c.Assert(err, IsNil)
	wal.SetServerId(1)
}
Esempio n. 4
0
func (self *ProtobufClientSuite) BenchmarkSingle(c *gocheck.C) {
	var HEARTBEAT_TYPE = protocol.Request_HEARTBEAT
	prs := FakeHeartbeatServer()
	client := NewProtobufClient(prs.Listener.Addr().String(), time.Second)
	client.Connect()
	c.ResetTimer()
	for i := 0; i < c.N; i++ {
		responseChan := make(chan *protocol.Response, 1)
		heartbeatRequest := &protocol.Request{
			Type:     &HEARTBEAT_TYPE,
			Database: protocol.String(""),
		}
		rcw := cluster.NewResponseChannelWrapper(responseChan)
		client.MakeRequest(heartbeatRequest, rcw)
		<-responseChan
	}
}
Esempio n. 5
0
func (self *ProtobufServer) sendErrorResponse(conn net.Conn, message string) error {
	response := &protocol.Response{
		Type:         protocol.Response_ERROR.Enum(),
		ErrorMessage: protocol.String(message),
	}
	data, err := response.Encode()
	if err != nil {
		return err
	}

	buff := bytes.NewBuffer(make([]byte, 0, len(data)+4))
	err = binary.Write(buff, binary.LittleEndian, uint32(len(data)))

	if err != nil {
		return err
	}

	_, err = conn.Write(append(buff.Bytes(), data...))
	return err
}
Esempio n. 6
0
func (self *ProtobufRequestHandler) handleWrites(request *protocol.Request, conn net.Conn) {
	shard := self.clusterConfig.GetLocalShardById(*request.ShardId)
	log.Debug("HANDLE: (%d):%d:%v", self.clusterConfig.LocalServer.Id, request.GetId(), shard)
	err := shard.WriteLocalOnly(request)
	var response *protocol.Response
	if err != nil {
		log.Error("ProtobufRequestHandler: error writing local shard: %s", err)
		response = &protocol.Response{
			RequestId:    request.Id,
			Type:         protocol.Response_ERROR.Enum(),
			ErrorMessage: protocol.String(err.Error()),
		}
	} else {
		response = &protocol.Response{
			RequestId: request.Id,
			Type:      protocol.Response_END_STREAM.Enum(),
		}
	}
	if err := self.WriteResponse(conn, response); err != nil {
		log.Error("ProtobufRequestHandler: error writing local shard: %s", err)
	}
}
Esempio n. 7
0
func (self *TrieTestSuite) TestTrie(c *C) {
	trie := NewTrie(2, 1)

	firstValue := []*protocol.FieldValue{
		{StringValue: protocol.String("some_value")},
		{Int64Value: protocol.Int64(1)},
	}
	firstNode := trie.GetNode(firstValue)
	c.Assert(firstNode, NotNil)
	c.Assert(trie.GetNode(firstValue), DeepEquals, firstNode)
	c.Assert(trie.CountLeafNodes(), Equals, 1)

	secondValue := []*protocol.FieldValue{
		{StringValue: protocol.String("some_value")},
		{Int64Value: protocol.Int64(2)},
	}
	secondNode := trie.GetNode(secondValue)
	c.Assert(secondNode, NotNil)
	c.Assert(trie.GetNode(secondValue), DeepEquals, secondNode)
	c.Assert(trie.CountLeafNodes(), Equals, 2)

	thirdValue := []*protocol.FieldValue{
		{StringValue: protocol.String("another_value")},
		{Int64Value: protocol.Int64(1)},
	}
	thirdNode := trie.GetNode(thirdValue)
	c.Assert(thirdNode, NotNil)
	c.Assert(trie.GetNode(thirdValue), DeepEquals, thirdNode)
	c.Assert(trie.CountLeafNodes(), Equals, 3)

	nodes := 0
	orderValues := [][]*protocol.FieldValue{thirdValue, firstValue, secondValue}
	c.Assert(trie.Traverse(func(v []*protocol.FieldValue, _ *Node) error {
		c.Assert(v, DeepEquals, orderValues[nodes])
		nodes++
		return nil
	}), IsNil)
	c.Assert(nodes, Equals, trie.CountLeafNodes())

	// make sure TraverseLevel work as expected
	ns := []*Node{}
	c.Assert(trie.TraverseLevel(0, func(_ []*protocol.FieldValue, n *Node) error {
		ns = append(ns, n)
		return nil
	}), IsNil)
	c.Assert(ns, HasLen, 1) // should return the root node only
	c.Assert(ns[0].value, IsNil)

	ns = []*Node{}
	c.Assert(trie.TraverseLevel(1, func(_ []*protocol.FieldValue, n *Node) error {
		ns = append(ns, n)
		return nil
	}), IsNil)
	c.Assert(ns, HasLen, 2) // should return the root node only
	c.Assert(ns[0].value.GetStringValue(), Equals, "another_value")
	c.Assert(ns[1].value.GetStringValue(), Equals, "some_value")

	c.Assert(ns[0].GetChildNode(&protocol.FieldValue{Int64Value: protocol.Int64(1)}).isLeaf, Equals, true)
	c.Assert(ns[0].GetChildNode(&protocol.FieldValue{Int64Value: protocol.Int64(2)}), IsNil)

	c.Assert(ns[1].GetChildNode(&protocol.FieldValue{Int64Value: protocol.Int64(1)}).isLeaf, Equals, true)
	c.Assert(ns[1].GetChildNode(&protocol.FieldValue{Int64Value: protocol.Int64(2)}).isLeaf, Equals, true)
}
Esempio n. 8
0
func (self *ShardData) Query(querySpec *parser.QuerySpec, response chan<- *p.Response) {
	log.Debug("QUERY: shard %d, query '%s'", self.Id(), querySpec.GetQueryStringWithTimeCondition())
	defer common.RecoverFunc(querySpec.Database(), querySpec.GetQueryStringWithTimeCondition(), func(err interface{}) {
		response <- &p.Response{
			Type:         p.Response_ERROR.Enum(),
			ErrorMessage: p.String(fmt.Sprintf("%s", err)),
		}
	})

	// This is only for queries that are deletes or drops. They need to be sent everywhere as opposed to just the local or one of the remote shards.
	// But this boolean should only be set to true on the server that receives the initial query.
	if querySpec.RunAgainstAllServersInShard {
		if querySpec.IsDeleteFromSeriesQuery() {
			self.logAndHandleDeleteQuery(querySpec, response)
		} else if querySpec.IsDropSeriesQuery() {
			self.logAndHandleDropSeriesQuery(querySpec, response)
		}
	}

	if self.IsLocal {
		var processor engine.Processor = NewResponseChannelProcessor(NewResponseChannelWrapper(response))
		var err error

		processor = NewShardIdInserterProcessor(self.Id(), processor)

		processor, err = self.getProcessor(querySpec, processor)
		if err != nil {
			response <- &p.Response{
				Type:         p.Response_ERROR.Enum(),
				ErrorMessage: p.String(err.Error()),
			}
			log.Error("Error while creating engine: %s", err)
			return
		}
		shard, err := self.store.GetOrCreateShard(self.id)
		if err != nil {
			response <- &p.Response{
				Type:         p.Response_ERROR.Enum(),
				ErrorMessage: p.String(err.Error()),
			}
			log.Error("Error while getting shards: %s", err)
			return
		}
		defer self.store.ReturnShard(self.id)

		log.Debug("Processor chain:  %s\n", engine.ProcessorChain(processor))

		err = shard.Query(querySpec, processor)
		// if we call Close() in case of an error it will mask the error
		if err != nil {
			response <- &p.Response{
				Type:         p.Response_ERROR.Enum(),
				ErrorMessage: p.String(err.Error()),
			}
			return
		}
		processor.Close()
		response <- &p.Response{Type: p.Response_END_STREAM.Enum()}
		return
	}

	if server := self.randomHealthyServer(); server != nil {
		log.Debug("Querying server %d for shard %d", server.GetId(), self.Id())
		request := self.createRequest(querySpec)
		server.MakeRequest(request, response)
		return
	}

	message := fmt.Sprintf("No servers up to query shard %d", self.id)
	response <- &p.Response{
		Type:         p.Response_ERROR.Enum(),
		ErrorMessage: &message,
	}
	log.Error(message)
}