Beispiel #1
0
func Publish(input chan *FileEvent, source string, ctrl chan bool) {
	clientConfig := sarama.NewConfig()
	clientConfig.Producer.RequiredAcks = sarama.WaitForLocal
	clientConfig.Producer.Compression = sarama.CompressionSnappy
	clientConfig.Producer.Flush.Frequency = 500 * time.Millisecond
	clientConfig.Producer.Flush.Messages = 200
	clientConfig.Producer.Flush.MaxMessages = 200
	clientConfig.Producer.Flush.Bytes = 16384
	clientConfig.Producer.Return.Successes = true
	clientConfig.Producer.Partitioner = sarama.NewRoundRobinPartitioner
	clientConfig.ChannelBufferSize = kafkabuffer

	//brokerList := []string{"127.0.0.1:9092"}
	var producer sarama.AsyncProducer
	var err error
	for {
		producer, err = sarama.NewAsyncProducer(brokerList, clientConfig)
		if err != nil {
			log.Error("Publish: Failed to start Sarama producer: ", err)
			log.Info("waiting....")
			time.Sleep(1 * time.Second)
		} else {
			break
		}
	}

	defer func() {
		if err := producer.Close(); err != nil {
			log.Error("Failed to shutdown producer cleanly", err)
		}
	}()

	registrar := &Registrar{source: source, publishCtrl: ctrl}
	go registrar.RegistrarDo(producer.Errors(), producer.Successes())

	topic := kafkaTopic
	baseName := filepath.Base(source)
	if len(topicmap) > 0 {
		tmpTopic := genTopic(baseName, topicmap)
		if tmpTopic != "" {
			topic = tmpTopic
		}
	}

	key := hashKey
	for event := range input {
		log.Debugf("%v, %v, %v, %v", *event.Source, *event.Text, event.Line, event.Offset)
		key = strconv.FormatInt(event.Offset, 10)
		producer.Input() <- &sarama.ProducerMessage{
			Topic:    topic,
			Key:      sarama.StringEncoder(key),
			Value:    sarama.StringEncoder(*event.Text),
			Metadata: event,
		}
	}

}
Beispiel #2
0
func expectationProducer(p sarama.AsyncProducer, expectations chan<- *sarama.ProducerMessage, wg *sync.WaitGroup) {
	defer wg.Done()

	var producerWg sync.WaitGroup

	producerWg.Add(1)
	go func() {
		defer producerWg.Done()
		for msg := range p.Successes() {
			stats.LogProduced(msg)
			expectations <- msg
		}
	}()

	producerWg.Add(1)
	go func() {
		defer producerWg.Done()
		for err := range p.Errors() {
			logger.Println("Failed to produce message:", err)
		}
	}()

	go monitor()
	logger.Printf("Producing %d messages...\n", *batchSize)

ProducerLoop:
	for i := 0; i < *batchSize; i++ {
		msg := &sarama.ProducerMessage{
			Topic:    *topic,
			Key:      sarama.StringEncoder(fmt.Sprintf("%d", i)),
			Value:    nil,
			Metadata: &MessageMetadata{Enqueued: time.Now()},
		}

		select {
		case <-shutdown:
			logger.Println("Early shutdown initiated...")
			break ProducerLoop
		case p.Input() <- msg:
			stats.LogEnqueued(msg)
		}

		if *sleep > 0 {
			time.Sleep(time.Duration(*sleep))
		}
	}

	p.AsyncClose()
	producerWg.Wait()
	close(expectations)
}