Ejemplo n.º 1
0
func (prod *Kafka) send(msg core.Message) {
	// If we have not yet connected or the connection dropped: connect.
	if prod.client == nil || prod.client.Closed() {
		if prod.producer != nil {
			prod.producer.Close()
			prod.producer = nil
		}

		var err error
		prod.client, err = kafka.NewClient(prod.servers, prod.config)
		if err != nil {
			Log.Error.Print("Kafka client error:", err)
			return // ### return, connection failed ###
		}
	}

	// Make sure we have a producer up and running
	if prod.producer == nil {
		var err error
		prod.producer, err = kafka.NewAsyncProducerFromClient(prod.client)
		if err != nil {
			Log.Error.Print("Kafka producer error:", err)
			return // ### return, connection failed ###
		}
	}

	if prod.client != nil && prod.producer != nil {
		msg.Data, msg.StreamID = prod.ProducerBase.Format(msg)

		// Send message
		topic, topicMapped := prod.topic[msg.StreamID]
		if !topicMapped {
			// Use wildcard fallback or stream name if not set
			topic, topicMapped = prod.topic[core.WildcardStreamID]
			if !topicMapped {
				topic = core.StreamTypes.GetStreamName(msg.StreamID)
			}
		}

		prod.producer.Input() <- &kafka.ProducerMessage{
			Topic: topic,
			Key:   nil,
			Value: kafka.ByteEncoder(msg.Data),
		}

		// Check for errors
		select {
		case err := <-prod.producer.Errors():
			Log.Error.Print("Kafka producer error:", err)
		default:
		}
	}
}
Ejemplo n.º 2
0
// Start one consumer per partition as a go routine
func (cons *Kafka) startConsumers() error {
	var err error

	cons.client, err = kafka.NewClient(cons.servers, cons.config)
	if err != nil {
		return err
	}

	cons.consumer, err = kafka.NewConsumerFromClient(cons.client)
	if err != nil {
		return err
	}

	partitions, err := cons.client.Partitions(cons.topic)
	if err != nil {
		return err
	}

	for _, partition := range partitions {
		if _, mapped := cons.offsets[partition]; !mapped {
			cons.offsets[partition] = cons.defaultOffset
		}
		if partition > cons.MaxPartitionID {
			cons.MaxPartitionID = partition
		}
	}

	for _, partition := range partitions {
		partition := partition
		if _, mapped := cons.offsets[partition]; !mapped {
			cons.offsets[partition] = cons.defaultOffset
		}

		go func() {
			defer shared.RecoverShutdown()
			cons.readFromPartition(partition)
		}()
	}

	return nil
}
Ejemplo n.º 3
0
func (z *zipkin) run() {
	t := time.NewTicker(z.opts.BatchInterval)

	c, err := sarama.NewClient(z.opts.Collectors, sarama.NewConfig())
	if err != nil {
		return
	}

	p, err := sarama.NewSyncProducerFromClient(c)
	if err != nil {
		return
	}

	var buf []*trace.Span

	for {
		select {
		case s := <-z.spans:
			buf = append(buf, s)
			if len(buf) >= z.opts.BatchSize {
				go z.send(buf, p)
				buf = nil
			}
		case <-t.C:
			// flush
			if len(buf) > 0 {
				go z.send(buf, p)
				buf = nil
			}
		case <-z.exit:
			// exit
			t.Stop()
			p.Close()
			return
		}
	}
}