Esempio n. 1
0
func initProducer(moduleConfig *Config) (*Producer, error) {
	fmt.Println("[INFO] initProducer called")
	brokerList := moduleConfig.Kafka.BrokerList
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll // only wait for leader to ack
	config.Producer.Compression = sarama.CompressionSnappy
	config.Producer.Flush.Frequency = 500 * time.Millisecond

	var producer sarama.AsyncProducer
	var err error
	for currConnAttempt := 0; currConnAttempt < moduleConfig.Kafka.MaxRetry; currConnAttempt++ {
		producer, err = sarama.NewAsyncProducer(brokerList, config)
		if err == nil {
			break
		}
		fmt.Println("[INFO] Connection attempt faild (", (currConnAttempt + 1), "/", moduleConfig.Kafka.MaxRetry, ")")
		<-time.After(time.Second * 5)
	}

	if err != nil {
		fmt.Println("[ERROR] Unable to setup kafka producer", err)
		return nil, err
	}

	//You must read from the Errors() channel or the producer will deadlock.
	go func() {
		for err := range producer.Errors() {
			log.Println("[ERROR] Kadka producer Error: ", err)
		}
	}()

	fmt.Println("[INFO] kafka producer initialized successfully")
	return &Producer{producer: producer, id: CreatedProducersLength()}, nil
}
Esempio n. 2
0
func Publish(input chan *FileEvent, source string, ctrl chan bool) {
	clientConfig := sarama.NewConfig()
	clientConfig.Producer.RequiredAcks = sarama.WaitForLocal
	clientConfig.Producer.Compression = sarama.CompressionSnappy
	clientConfig.Producer.Flush.Frequency = 500 * time.Millisecond
	clientConfig.Producer.Flush.Messages = 200
	clientConfig.Producer.Flush.MaxMessages = 200
	clientConfig.Producer.Flush.Bytes = 16384
	clientConfig.Producer.Return.Successes = true
	clientConfig.Producer.Partitioner = sarama.NewRoundRobinPartitioner
	clientConfig.ChannelBufferSize = kafkabuffer

	//brokerList := []string{"127.0.0.1:9092"}
	var producer sarama.AsyncProducer
	var err error
	for {
		producer, err = sarama.NewAsyncProducer(brokerList, clientConfig)
		if err != nil {
			log.Error("Publish: Failed to start Sarama producer: ", err)
			log.Info("waiting....")
			time.Sleep(1 * time.Second)
		} else {
			break
		}
	}

	defer func() {
		if err := producer.Close(); err != nil {
			log.Error("Failed to shutdown producer cleanly", err)
		}
	}()

	registrar := &Registrar{source: source, publishCtrl: ctrl}
	go registrar.RegistrarDo(producer.Errors(), producer.Successes())

	topic := kafkaTopic
	baseName := filepath.Base(source)
	if len(topicmap) > 0 {
		tmpTopic := genTopic(baseName, topicmap)
		if tmpTopic != "" {
			topic = tmpTopic
		}
	}

	key := hashKey
	for event := range input {
		log.Debugf("%v, %v, %v, %v", *event.Source, *event.Text, event.Line, event.Offset)
		key = strconv.FormatInt(event.Offset, 10)
		producer.Input() <- &sarama.ProducerMessage{
			Topic:    topic,
			Key:      sarama.StringEncoder(key),
			Value:    sarama.StringEncoder(*event.Text),
			Metadata: event,
		}
	}

}
Esempio n. 3
0
func expectationProducer(p sarama.AsyncProducer, expectations chan<- *sarama.ProducerMessage, wg *sync.WaitGroup) {
	defer wg.Done()

	var producerWg sync.WaitGroup

	producerWg.Add(1)
	go func() {
		defer producerWg.Done()
		for msg := range p.Successes() {
			stats.LogProduced(msg)
			expectations <- msg
		}
	}()

	producerWg.Add(1)
	go func() {
		defer producerWg.Done()
		for err := range p.Errors() {
			logger.Println("Failed to produce message:", err)
		}
	}()

	go monitor()
	logger.Printf("Producing %d messages...\n", *batchSize)

ProducerLoop:
	for i := 0; i < *batchSize; i++ {
		msg := &sarama.ProducerMessage{
			Topic:    *topic,
			Key:      sarama.StringEncoder(fmt.Sprintf("%d", i)),
			Value:    nil,
			Metadata: &MessageMetadata{Enqueued: time.Now()},
		}

		select {
		case <-shutdown:
			logger.Println("Early shutdown initiated...")
			break ProducerLoop
		case p.Input() <- msg:
			stats.LogEnqueued(msg)
		}

		if *sleep > 0 {
			time.Sleep(time.Duration(*sleep))
		}
	}

	p.AsyncClose()
	producerWg.Wait()
	close(expectations)
}