Esempio n. 1
1
func (k *Kafka) Connect() error {
	config := sarama.NewConfig()

	config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks)
	config.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec)
	config.Producer.Retry.Max = k.MaxRetry

	// Legacy support ssl config
	if k.Certificate != "" {
		k.SSLCert = k.Certificate
		k.SSLCA = k.CA
		k.SSLKey = k.Key
	}

	tlsConfig, err := internal.GetTLSConfig(
		k.SSLCert, k.SSLKey, k.SSLCA, k.InsecureSkipVerify)
	if err != nil {
		return err
	}

	if tlsConfig != nil {
		config.Net.TLS.Config = tlsConfig
		config.Net.TLS.Enable = true
	}

	producer, err := sarama.NewSyncProducer(k.Brokers, config)
	if err != nil {
		return err
	}
	k.producer = producer
	return nil
}
Esempio n. 2
0
func NewProducer(brokerAddrs []string, conf *sarama.Config) (*Producer, error) {
	producer, err := sarama.NewSyncProducer(brokerAddrs, conf)
	if err != nil {
		return nil, errors.Trace(err)
	}
	return &Producer{producer}, nil
}
Esempio n. 3
0
func main() {
	var conf Conf
	err := envdecode.Decode(&conf)
	if err != nil {
		log.Fatal(err)
	}

	stripe.Key = conf.StripeKey
	//stripe.LogLevel = 1 // errors only

	producer, err := sarama.NewSyncProducer(strings.Split(conf.SeedBroker, ","), nil)
	if err != nil {
		log.Fatal(err)
	}
	defer func() {
		if err := producer.Close(); err != nil {
			log.Fatal(err)
		}
	}()

	log.Printf("Tailing the log")
	err = tailLog(producer, conf.KafkaTopic)
	if err != nil {
		log.Fatal(err)
	}
}
Esempio n. 4
0
func (k *Kafka) Connect() error {
	config := sarama.NewConfig()
	// Wait for all in-sync replicas to ack the message
	config.Producer.RequiredAcks = sarama.WaitForAll
	// Retry up to 10 times to produce the message
	config.Producer.Retry.Max = 10

	// Legacy support ssl config
	if k.Certificate != "" {
		k.SSLCert = k.Certificate
		k.SSLCA = k.CA
		k.SSLKey = k.Key
	}

	tlsConfig, err := internal.GetTLSConfig(
		k.SSLCert, k.SSLKey, k.SSLCA, k.InsecureSkipVerify)
	if err != nil {
		return err
	}

	if tlsConfig != nil {
		config.Net.TLS.Config = tlsConfig
		config.Net.TLS.Enable = true
	}

	producer, err := sarama.NewSyncProducer(k.Brokers, config)
	if err != nil {
		return err
	}
	k.producer = producer
	return nil
}
Esempio n. 5
0
func (k *KafkaProducer) Start(host string, port int) {

	connection := host + ":" + strconv.Itoa(port)

	k.Log.Notice("Connecting to Kafka on " + connection + "...")

	config := sarama.NewConfig()
	config.Metadata.Retry.Backoff = (10 * time.Second)

	/**
	 *  Set producer config
	 */
	// don't use zip compression
	config.Producer.Compression = 0

	// We are just streaming metrics, so don't not wait for any Kafka Acks.
	config.Producer.RequiredAcks = -1

	producer, err := sarama.NewSyncProducer([]string{connection}, config)
	if err != nil {

		k.Log.Error("Error connecting to Kafka: ", err.Error())
	} else {
		k.Log.Notice("Connection to Kafka successful")
	}

	go k.produce(producer)

}
Esempio n. 6
0
func InitKafka(kafkaAddrs []string) (err error) {
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll
	config.Producer.Partitioner = sarama.NewHashPartitioner
	producer, err = sarama.NewSyncProducer(kafkaAddrs, config)
	return
}
Esempio n. 7
0
func pubKafkaLoop(seq int) {
	cf := sarama.NewConfig()
	cf.Producer.RequiredAcks = sarama.WaitForLocal
	cf.Producer.Partitioner = sarama.NewHashPartitioner
	cf.Producer.Timeout = time.Second
	//cf.Producer.Compression = sarama.CompressionSnappy
	cf.Producer.Retry.Max = 3
	producer, err := sarama.NewSyncProducer([]string{"localhost:9092"}, cf)
	if err != nil {
		stress.IncCounter("fail", 1)
		log.Println(err)
		return
	}

	defer producer.Close()
	msg := strings.Repeat("X", sz)
	for i := 0; i < loops; i++ {
		_, _, err := producer.SendMessage(&sarama.ProducerMessage{
			Topic: topic,
			Value: sarama.StringEncoder(msg),
		})
		if err == nil {
			stress.IncCounter("ok", 1)
		} else {
			stress.IncCounter("fail", 1)
		}
	}

}
Esempio n. 8
0
func main() {
	flag.Parse()
	if *host == "" || *topic == "" || *logfile == "" {
		fmt.Printf("pararm error,host=%s,topic=%s,logfile=%s\n", *host, *topic, *logfile)
		os.Exit(0)
	}

	hosts := strings.Split(*host, ",")
	producer, err := sarama.NewSyncProducer(hosts, nil)
	if err != nil {
		fmt.Printf("create kafka syncproducer fail. %+v\n", err)
		os.Exit(-1)
	}
	defer producer.Close()

	file, err1 := os.Open(*logfile)
	if err1 != nil {
		fmt.Printf("open logfile %s fail. %+v\n", *logfile, err1)
		os.Exit(-2)
	}
	defer file.Close()

	scanner := bufio.NewScanner(file)
	for scanner.Scan() {
		msg := &sarama.ProducerMessage{Topic: *topic, Value: sarama.StringEncoder(scanner.Text())}
		_, _, err := producer.SendMessage(msg)
		if err != nil {
			fmt.Printf("FAILED to send message: %s\n", err)
		}
	}
	if err := scanner.Err(); err != nil {
		log.Fatal(err)
	}
}
Esempio n. 9
0
func newProducer(brokers []string, kafkaVersion sarama.KafkaVersion, retryOptions config.Retry) Producer {
	var p sarama.SyncProducer
	var err error
	brokerConfig := newBrokerConfig(kafkaVersion, rawPartition)

	repeatTick := time.NewTicker(retryOptions.Period)
	panicTick := time.NewTicker(retryOptions.Stop)
	defer repeatTick.Stop()
	defer panicTick.Stop()

loop:
	for {
		select {
		case <-panicTick.C:
			panic(fmt.Errorf("Failed to create Kafka producer: %v", err))
		case <-repeatTick.C:
			logger.Debug("Connecting to Kafka cluster:", brokers)
			p, err = sarama.NewSyncProducer(brokers, brokerConfig)
			if err == nil {
				break loop
			}
		}
	}

	logger.Debug("Connected to the Kafka cluster")
	return &producerImpl{producer: p}
}
Esempio n. 10
0
func NewServer() Server {
	producer, err := sarama.NewSyncProducer([]string{"localhost:9092"}, nil)
	if err != nil {
		log.Fatalln(err)
	}
	return Server{producer}
}
Esempio n. 11
0
func newDataCollector(brokerList []string) sarama.SyncProducer {

	// For the data collector, we are looking for strong consistency semantics.
	// Because we don't change the flush settings, sarama will try to produce messages
	// as fast as possible to keep latency low.
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message
	config.Producer.Retry.Max = 10                   // Retry up to 10 times to produce the message
	tlsConfig := createTlsConfiguration()
	if tlsConfig != nil {
		config.Net.TLS.Config = tlsConfig
		config.Net.TLS.Enable = true
	}

	// On the broker side, you may want to change the following settings to get
	// stronger consistency guarantees:
	// - For your broker, set `unclean.leader.election.enable` to false
	// - For the topic, you could increase `min.insync.replicas`.

	producer, err := sarama.NewSyncProducer(brokerList, config)
	if err != nil {
		log.Fatalln("Failed to start Sarama producer:", err)
	}

	return producer
}
Esempio n. 12
0
func main() {

	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll
	config.Producer.Retry.Max = 5

	// brokers := []string{"192.168.59.103:9092"}
	brokers := []string{"localhost:9092"}
	producer, err := sarama.NewSyncProducer(brokers, config)
	if err != nil {
		// Should not reach here
		panic(err)
	}

	defer func() {
		if err := producer.Close(); err != nil {
			// Should not reach here
			panic(err)
		}
	}()

	topic := "important"
	msg := &sarama.ProducerMessage{
		Topic: topic,
		Value: sarama.StringEncoder("Something Cool"),
	}

	partition, offset, err := producer.SendMessage(msg)
	if err != nil {
		panic(err)
	}

	fmt.Printf("Message is stored in topic(%s)/partition(%d)/offset(%d)\n", topic, partition, offset)
}
func TestReadsMetricsFromKafka(t *testing.T) {
	if testing.Short() {
		t.Skip("Skipping integration test in short mode")
	}

	brokerPeers := []string{testutil.GetLocalHost() + ":9092"}
	zkPeers := []string{testutil.GetLocalHost() + ":2181"}
	testTopic := fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix())

	// Send a Kafka message to the kafka host
	msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257"
	producer, err := sarama.NewSyncProducer(brokerPeers, nil)
	require.NoError(t, err)
	_, _, err = producer.SendMessage(
		&sarama.ProducerMessage{
			Topic: testTopic,
			Value: sarama.StringEncoder(msg),
		})
	require.NoError(t, err)
	defer producer.Close()

	// Start the Kafka Consumer
	k := &Kafka{
		ConsumerGroup:  "telegraf_test_consumers",
		Topics:         []string{testTopic},
		ZookeeperPeers: zkPeers,
		PointBuffer:    100000,
		Offset:         "oldest",
	}
	if err := k.Start(); err != nil {
		t.Fatal(err.Error())
	} else {
		defer k.Stop()
	}

	waitForPoint(k, t)

	// Verify that we can now gather the sent message
	var acc testutil.Accumulator
	// Sanity check
	assert.Equal(t, 0, len(acc.Points), "There should not be any points")

	// Gather points
	err = k.Gather(&acc)
	require.NoError(t, err)
	if len(acc.Points) == 1 {
		point := acc.Points[0]
		assert.Equal(t, "cpu_load_short", point.Measurement)
		assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
		assert.Equal(t, map[string]string{
			"host":      "server01",
			"direction": "in",
			"region":    "us-west",
		}, point.Tags)
		assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix())
	} else {
		t.Errorf("No points found in accumulator, expected 1")
	}
}
Esempio n. 14
0
func (k *Kafka) Connect() error {
	producer, err := sarama.NewSyncProducer(k.Brokers, nil)
	if err != nil {
		return err
	}
	k.producer = producer
	return nil
}
Esempio n. 15
0
// NewProducer create a new kafka producer
func (k *Producer) NewProducer(brokerList, topics []string, config *sarama.Config) error {
	var err error
	k.producer, err = sarama.NewSyncProducer(brokerList, config)
	if err != nil {
		log.Fatalln("Failed to start producer:", err)
		return err
	}
	k.InitKafka(topics)
	return nil
}
Esempio n. 16
0
// NewProducer returns a new SyncProducer for give brokers addresses.
func NewProducer(broker string) (Producer, error) {
	kafkaConfig := sarama.NewConfig()
	kafkaConfig.Producer.Return.Successes = true

	kafkaProducer, err := sarama.NewSyncProducer([]string{broker}, kafkaConfig)
	if err != nil {
		return nil, err
	}

	return &producer{Producer: kafkaProducer}, nil
}
Esempio n. 17
0
func (c *KafkaClient) NewProducer(conf *Configure) error {
	hostports := conf.Kafka.Hosts
	producer, err := sarama.NewSyncProducer(hostports, nil)
	if err != nil {
		log.Printf("[kafka] create kafka syncproducer %+v error, %s\n", hostports, err)
	} else {
		log.Printf("[kafka] create kafka syncproducer %+v success.\n", hostports)
	}
	c.Porducer = producer
	return err
}
Esempio n. 18
0
//刷新producer
func Refresh() {
	once.Do(func() {
		conf := sarama.NewConfig()
		conf.Producer.RequiredAcks = sarama.WaitForAll //等待所有备份返回ack
		conf.Producer.Retry.Max = 10                   // 重试次数
		brokerList := config.KAFKA_BORKERS
		producer, err = sarama.NewSyncProducer(strings.Split(brokerList, ","), conf)
		if err != nil {
			logs.Log.Error("Kafka:%v\n", err)
		}
	})
}
Esempio n. 19
0
func main() {
	msg_number := flag.Int("number", 10000, "Number of messages")
	msg_size := flag.Int("size", 1000, "Message size")
	num_threads := flag.Int("threads", 20, "Number of threads (goroutines)")
	brokers := flag.String("brokers", "localhost:9093", "Comma separated kafka brokers list")
	topic := flag.String("topic", "my-topic", "Kafka topic to send messages to")
	flag.Parse()

	logger := log.New(os.Stdout, "producer ", log.Lmicroseconds)

	//logger.Println(broker)
	cfg := sarama.NewConfig()
	//Wait for replication
	cfg.Producer.RequiredAcks = -1
	cfg.Producer.Flush.Frequency = 333
	cfg.Producer.Flush.Messages = 1000
	cfg.Producer.Flush.MaxMessages = 3000
	producer, err := sarama.NewSyncProducer(strings.Split(*brokers, ","), cfg)
	if err != nil {
		logger.Fatalln(err)
	}
	defer func() {
		if err := producer.Close(); err != nil {
			logger.Fatalln(err)
		}
	}()

	c := make(chan int)
	logger.Println("Start")

	for i := 0; i < *num_threads; i++ {
		var chunk int
		if i == *num_threads-1 {
			chunk = *msg_number / *num_threads + (*msg_number % *num_threads)
		} else {
			chunk = *msg_number / *num_threads
		}
		go produce(producer, c, chunk, *msg_size, *topic, logger)
	}

	for i := 0; i < *num_threads; i++ {
		n := <-c
		logger.Printf("Thread%d has sent %d messages\n", i, n)
	}
	msg := &sarama.ProducerMessage{Topic: *topic, Value: sarama.StringEncoder("THE END")}
	_, _, err = producer.SendMessage(msg)
	if err != nil {
		logger.Printf("FAILED to send END message: %s\n", err)
	}

	logger.Println("Finish")

}
Esempio n. 20
0
func newSyncProducer(brokerList []string) sarama.SyncProducer {
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll
	config.Producer.Retry.Max = 10

	producer, err := sarama.NewSyncProducer(brokerList, config)
	if err != nil {
		log.Fatalln("Failed to start Sarama producer:", err)
	}

	return producer
}
Esempio n. 21
0
func main() {
	configFile := flag.String("c", "", "Config file")
	messageValue := flag.String("m", "", "Message")
	amount := flag.Int("a", 1, "Amount of messages")
	flag.Parse()

	if *configFile == "" || *messageValue == "" {
		flag.PrintDefaults()
		os.Exit(1)
	}

	options, err := revolver.LoadOptions(*configFile)
	if err != nil {
		log.Fatalln(err)
		os.Exit(1)
	}

	sarama.Logger = logger

	var keyEncoder, valueEncoder sarama.Encoder

	keyEncoder = sarama.StringEncoder(time.Now().String())
	if *messageValue != "" {
		valueEncoder = sarama.StringEncoder(*messageValue)
	}

	config := sarama.NewConfig()
	config.Producer.Partitioner = sarama.NewRandomPartitioner
	producer, err := sarama.NewSyncProducer(options.Brokers, config)
	if err != nil {
		logger.Fatalln("FAILED to open the producer:", err)
	}
	defer producer.Close()
	topic := options.KafkaTopics[0]

	for i := 0; i < *amount; i++ {
		partition, offset, err := producer.SendMessage(&sarama.ProducerMessage{
			Topic: topic,
			Key:   keyEncoder,
			Value: valueEncoder,
		})

		if err != nil {
			logger.Println("FAILED to produce message:", err)

		} else {
			logger.Printf("msg: %d, topic=%s\tpartition=%d\toffset=%d\n", i, topic, partition, offset)
		}
	}
}
func TestReadsMetricsFromKafka(t *testing.T) {
	if testing.Short() {
		t.Skip("Skipping integration test in short mode")
	}
	var zkPeers, brokerPeers []string

	if len(os.Getenv("ZOOKEEPER_PEERS")) == 0 {
		zkPeers = []string{"localhost:2181"}
	} else {
		zkPeers = strings.Split(os.Getenv("ZOOKEEPER_PEERS"), ",")
	}

	if len(os.Getenv("KAFKA_PEERS")) == 0 {
		brokerPeers = []string{"localhost:9092"}
	} else {
		brokerPeers = strings.Split(os.Getenv("KAFKA_PEERS"), ",")
	}

	k := &Kafka{
		ConsumerGroupName: "telegraf_test_consumers",
		Topic:             fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix()),
		ZookeeperPeers:    zkPeers,
	}

	msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257"
	producer, err := sarama.NewSyncProducer(brokerPeers, nil)
	require.NoError(t, err)
	_, _, err = producer.SendMessage(&sarama.ProducerMessage{Topic: k.Topic, Value: sarama.StringEncoder(msg)})
	producer.Close()

	var acc testutil.Accumulator

	// Sanity check
	assert.Equal(t, 0, len(acc.Points), "there should not be any points")

	err = k.Gather(&acc)
	require.NoError(t, err)

	assert.Equal(t, 1, len(acc.Points), "there should be a single point")

	point := acc.Points[0]
	assert.Equal(t, "cpu_load_short", point.Measurement)
	assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Values)
	assert.Equal(t, map[string]string{
		"host":      "server01",
		"direction": "in",
		"region":    "us-west",
	}, point.Tags)
	assert.Equal(t, time.Unix(0, 1422568543702900257), point.Time)
}
Esempio n. 23
0
// Create a new Kafka synchronized producer
func NewKafkaSyncProducer(brokers string) (*Kafka, error) {
	brokerList := strings.Split(brokers, ",")
	log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", "))
	// create Kafka client configuration
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message
	config.Producer.Retry.Max = 10                   // Retry up to 10 times to produce the message
	// tlsConfig := createTlsConfiguration()
	producer, err := sarama.NewSyncProducer(brokerList, config)
	if err != nil {
		log.Println("Failed to start Sarama producer:", err)
		return nil, err
	}
	return &Kafka{sp: producer}, nil
}
Esempio n. 24
0
// Constructor
func NewProducer(brokers []string) *Producer {
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll
	config.Producer.Retry.Max = 10

	syncProducer, err := sarama.NewSyncProducer(brokers, config)
	if err != nil {
		log.Fatalln("Failed to start Sarama producer:", err)
		panic(err)
	}

	return &Producer{
		syncProducer: syncProducer,
	}
}
Esempio n. 25
0
// NewKafkaPublisher will initiate a new experimental Kafka publisher.
func NewKafkaPublisher(cfg *config.Kafka) (*KafkaPublisher, error) {
	var err error
	p := &KafkaPublisher{}

	if len(cfg.Topic) == 0 {
		return p, errors.New("topic name is required")
	}
	p.topic = cfg.Topic

	sconfig := sarama.NewConfig()
	sconfig.Producer.Retry.Max = cfg.MaxRetry
	sconfig.Producer.RequiredAcks = KafkaRequiredAcks
	p.producer, err = sarama.NewSyncProducer(cfg.BrokerHosts, sconfig)
	return p, err
}
Esempio n. 26
0
func GetProducer(kafkaHost string) sarama.SyncProducer {
	config := sarama.NewConfig()

	brokers := []string{kafkaHost}

	config.Producer.RequiredAcks = sarama.WaitForLocal
	config.Producer.Compression = sarama.CompressionSnappy
	config.Producer.Flush.Frequency = 500 * time.Millisecond

	producer, err := sarama.NewSyncProducer(brokers, config)
	if err != nil {
		log.Fatalln("Failed to start Sarama producer:", err)
	}

	return producer
}
func PublishKafkav1(input chan []*FileEvent,
	registrar chan []*FileEvent,
	config *NetworkConfig) {
	var err error
	var sequence uint32
	pconfig := sarama.NewConfig()
	pconfig.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message
	pconfig.Producer.Retry.Max = 10                   // Retry up to 10 times to produce the message
	producer, err := sarama.NewSyncProducer(config.Servers, pconfig)
	if err != nil {
		emit("Failed to start Sarama producer:", err)
		return
	}

	for events := range input {

		for _, event := range events {
			sequence += 1
			//send here
			oops := func(err error) {
				// TODO(sissel): Track how frequently we timeout and reconnect. If we're
				// timing out too frequently, there's really no point in timing out since
				// basically everything is slow or down. We'll want to ratchet up the
				// timeout value slowly until things improve, then ratchet it down once
				// things seem healthy.
				emit("Socket error, will reconnect: %s\n", err)
				time.Sleep(1 * time.Second)
			}
			for {
				partition, offset, err := producer.SendMessage(&sarama.ProducerMessage{
					Topic: config.Topic,
					Value: sarama.StringEncoder(*event.Text),
				})
				if err != nil {
					oops(err)
					continue
				}
				emit("Data sent to partition/offset: /%d/%d", partition, offset)
				break
			}
		}

		// Tell the registrar that we've successfully sent these events
		registrar <- events
	}
}
Esempio n. 28
0
func main() {
	flag.Parse()

	if *verbose {
		sarama.Logger = logger
	}

	var partitionerConstructor sarama.PartitionerConstructor
	switch *partitioner {
	case "hash":
		partitionerConstructor = sarama.NewHashPartitioner
	case "random":
		partitionerConstructor = sarama.NewRandomPartitioner
	default:
		log.Fatalf("Partitioner %s not supported.", *partitioner)
	}

	var keyEncoder, valueEncoder sarama.Encoder
	if *key != "" {
		keyEncoder = sarama.StringEncoder(*key)
	}
	if *value != "" {
		valueEncoder = sarama.StringEncoder(*value)
	}

	config := sarama.NewConfig()
	config.Producer.Partitioner = partitionerConstructor

	producer, err := sarama.NewSyncProducer(strings.Split(*brokerList, ","), config)
	if err != nil {
		logger.Fatalln("FAILED to open the producer:", err)
	}
	defer producer.Close()

	partition, offset, err := producer.SendMessage(&sarama.ProducerMessage{
		Topic: *topic,
		Key:   keyEncoder,
		Value: valueEncoder,
	})

	if err != nil {
		logger.Println("FAILED to produce message:", err)
	} else {
		fmt.Printf("topic=%s\tpartition=%d\toffset=%d\n", *topic, partition, offset)
	}
}
Esempio n. 29
0
func newProducer(brokerList []string, cert *string, key *string, ca *string, verify bool) sarama.SyncProducer {
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message
	config.Producer.Retry.Max = 10                   // Retry up to 10 times to produce the message
	tlsConfig := createTLSConfig(cert, key, ca, verify)
	if tlsConfig != nil {
		config.Net.TLS.Config = tlsConfig
		config.Net.TLS.Enable = true
	}

	producer, err := sarama.NewSyncProducer(brokerList, config)
	if err != nil {
		log.Fatalf("Failed to start Sarama producer: %s", err.Error())
	}

	return producer
}
Esempio n. 30
0
File: kafka.go Progetto: ovh/tat
func initKafka() {
	if viper.GetString("kafka_client_id") == "" || viper.GetString("kafka_broker_addresses") == "" {
		log.Infof("No Kafka configured")
		return
	}
	c := sarama.NewConfig()
	c.ClientID = viper.GetString("kafka_client_id")

	var err error
	producer, err = sarama.NewSyncProducer(strings.Split(viper.GetString("kafka_broker_addresses"), ","), c)
	if err != nil {
		log.Errorf("Error with init sarama:%s (newSyncProducer)", err.Error())
	} else {
		hookKafkaEnabled = true
	}
	log.Infof("Kafka used at %s", viper.GetString("kafka_broker_addresses"))
}