Exemplo n.º 1
0
func main() {
	var host = flag.String("kafka", "127.0.0.1:9092", "IP address:port of kafka")
	flag.Parse()
	duration := 10 * time.Millisecond
	src := make(chan uint32)
	dst := make(chan uint32)
	notify := make(chan os.Signal, 1)
	signal.Notify(notify, os.Interrupt, os.Kill)

	config := kafka.NewConfig()
	config.Producer.Return.Successes = true
	k_producer, err := kafka.NewAsyncProducer([]string{*host}, config)
	if err != nil {
		panic(err)
	}
	fmt.Println("src_ip,dst_ip,src_coord,dst_coord,received_at")

	//dc_ips are data center IPs
	dc_ips := []uint32{1222977025, 2212761857, 2169380865}

	go producer(src, dc_ips, duration)
	go producer(dst, dc_ips, duration)
	go consumer(src, dst, k_producer)

	go func(producer kafka.AsyncProducer) {
		for {
			<-producer.Successes()
		}
	}(k_producer)

	s := <-notify
	fmt.Println("signal:", s)
	fmt.Println("done.")

}
Exemplo n.º 2
0
func newAccessLogProducer(brokerList []string) sarama.AsyncProducer {

	// For the access log, we are looking for AP semantics, with high throughput.
	// By creating batches of compressed messages, we reduce network I/O at a cost of more latency.
	config := sarama.NewConfig()
	tlsConfig := createTlsConfiguration()
	if tlsConfig != nil {
		config.Net.TLS.Enable = true
		config.Net.TLS.Config = tlsConfig
	}
	config.Producer.RequiredAcks = sarama.WaitForLocal       // Only wait for the leader to ack
	config.Producer.Compression = sarama.CompressionSnappy   // Compress messages
	config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms

	producer, err := sarama.NewAsyncProducer(brokerList, config)
	if err != nil {
		log.Fatalln("Failed to start Sarama producer:", err)
	}

	// We will just log to STDOUT if we're not able to produce messages.
	// Note: messages will only be returned here after all retry attempts are exhausted.
	go func() {
		for err := range producer.Errors() {
			log.Println("Failed to write access log entry:", err)
		}
	}()

	return producer
}
Exemplo n.º 3
0
func newStorage(machineName string) (storage.StorageDriver, error) {
	config := kafka.NewConfig()

	tlsConfig, err := generateTLSConfig()
	if err != nil {
		return nil, err
	}

	if tlsConfig != nil {
		config.Net.TLS.Enable = true
		config.Net.TLS.Config = tlsConfig
	}

	config.Producer.RequiredAcks = kafka.WaitForAll

	brokerList := strings.Split(*brokers, ",")
	glog.V(4).Infof("Kafka brokers:%q", brokers)

	producer, err := kafka.NewAsyncProducer(brokerList, config)
	if err != nil {
		return nil, err
	}
	ret := &kafkaStorage{
		producer:    producer,
		topic:       *topic,
		machineName: machineName,
	}
	return ret, nil
}
Exemplo n.º 4
0
func main() {
	producer, err := sarama.NewAsyncProducer([]string{"10.3.10.32:9091"}, nil)
	if err != nil {
		panic(err)
	}

	defer func() {
		if err = producer.Close(); err != nil {
			log.Fatalln(err)
		}
	}()

	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)

	var enqueued, errors int
ProducerLoop:
	for {
		select {
		case producer.Input() <- &sarama.ProducerMessage{Topic: "dataman_test", Key: nil, Value: sarama.StringEncoder("testing 123")}:
			enqueued++
		case err = <-producer.Errors():
			log.Println("Failed to produce message", err)
			errors++
		case <-signals:
			break ProducerLoop
		}
	}

	log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors)
}
Exemplo n.º 5
0
func newAsyncProducer(tlsConfig *tls.Config, brokerList []string) *sarama.AsyncProducer {
	config := sarama.NewConfig()

	config.Producer.RequiredAcks = sarama.WaitForLocal       // Only wait for the leader to ack
	config.Producer.Compression = sarama.CompressionSnappy   // Compress messages
	config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms

	// On the broker side, you may want to change the following settings to get
	// stronger consistency guarantees:
	// - For your broker, set `unclean.leader.election.enable` to false
	// - For the topic, you could increase `min.insync.replicas`.

	producer, err := sarama.NewAsyncProducer(brokerList, config)
	if err != nil {
		log.Fatalln("Failed to start Sarama producer:", err)
	}

	// We will just log to STDOUT if we're not able to produce messages.
	// Note: messages will only be returned here after all retry attempts are exhausted.
	// this goroutine will eventually exit as producer.shutdown closes the errors channel
	go func() {
		for err := range producer.Errors() {
			log.Println("Failed to write access log entry:", err)
		}
	}()

	return &producer

}
Exemplo n.º 6
0
func NewEventPublisher() (*EventPublisher, error) {
	config := sarama.NewConfig()
	config.ClientID = ipresolver.GetLocalAddr()
	config.Producer.RequiredAcks = sarama.WaitForLocal
	config.Producer.Compression = sarama.CompressionNone
	config.Producer.Return.Successes = false
	config.Producer.Return.Errors = false
	config.Producer.Partitioner = sarama.NewHashPartitioner
	asyncProducer, err := sarama.NewAsyncProducer(eatonconfig.KafkaServers, config)

	if err != nil {
		return nil, err
	}
	if config.Producer.Return.Successes {
		go func() {
			for msg := range asyncProducer.Successes() {
				log.Println("Sent Message to logs: ", msg.Key)
			}
		}()
	}
	if config.Producer.Return.Errors {
		go func() {
			for err := range asyncProducer.Errors() {
				log.Println("failed to send message to logs: ", err.Error())
			}
		}()
	}
	return &EventPublisher{
		producer: asyncProducer,
	}, nil
}
Exemplo n.º 7
0
func handler(w http.ResponseWriter, r *http.Request) {
	decoder := json.NewDecoder(r.Body)
	var request Request
	err := decoder.Decode(&request)
	if err != nil {
		log.Print("Could not decode request")
		http.Error(w, err.Error(), 500)
		return
	}

	log.Print("Received request for kind: ", request.Kind)

	config := sarama.NewConfig()
	producer, err := sarama.NewAsyncProducer(KafkaAddresses, config)

	if err != nil {
		log.Print("Could not connect to Kafka: ", err)
		http.Error(w, err.Error(), 500)
		return
	}

	log.Print("Connected to Kafka")

	message := sarama.ProducerMessage{
		Topic: request.Kind,
		Value: MapEncoder(request.Data),
	}

	producer.Input() <- &message

	log.Print("Message sent")

	fmt.Fprintf(w, "OK")
}
Exemplo n.º 8
0
// Setup prepares the Requester for benchmarking.
func (k *kafkaRequester) Setup() error {
	config := sarama.NewConfig()
	producer, err := sarama.NewAsyncProducer(k.urls, config)
	if err != nil {
		return err
	}

	consumer, err := sarama.NewConsumer(k.urls, nil)
	if err != nil {
		producer.Close()
		return err
	}
	partitionConsumer, err := consumer.ConsumePartition(k.topic, 0, sarama.OffsetNewest)
	if err != nil {
		producer.Close()
		consumer.Close()
		return err
	}

	k.producer = producer
	k.consumer = consumer
	k.partitionConsumer = partitionConsumer
	k.msg = &sarama.ProducerMessage{
		Topic: k.topic,
		Value: sarama.ByteEncoder(make([]byte, k.payloadSize)),
	}

	return nil
}
Exemplo n.º 9
0
func initProducer(moduleConfig *Config) (*Producer, error) {
	fmt.Println("[INFO] initProducer called")
	brokerList := moduleConfig.Kafka.BrokerList
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll // only wait for leader to ack
	config.Producer.Compression = sarama.CompressionSnappy
	config.Producer.Flush.Frequency = 500 * time.Millisecond

	var producer sarama.AsyncProducer
	var err error
	for currConnAttempt := 0; currConnAttempt < moduleConfig.Kafka.MaxRetry; currConnAttempt++ {
		producer, err = sarama.NewAsyncProducer(brokerList, config)
		if err == nil {
			break
		}
		fmt.Println("[INFO] Connection attempt faild (", (currConnAttempt + 1), "/", moduleConfig.Kafka.MaxRetry, ")")
		<-time.After(time.Second * 5)
	}

	if err != nil {
		fmt.Println("[ERROR] Unable to setup kafka producer", err)
		return nil, err
	}

	//You must read from the Errors() channel or the producer will deadlock.
	go func() {
		for err := range producer.Errors() {
			log.Println("[ERROR] Kadka producer Error: ", err)
		}
	}()

	fmt.Println("[INFO] kafka producer initialized successfully")
	return &Producer{producer: producer, id: CreatedProducersLength()}, nil
}
Exemplo n.º 10
0
func pubKafkaAsyncLoop(seq int) {
	cf := sarama.NewConfig()
	cf.Producer.Flush.Frequency = time.Second * 10
	cf.Producer.Flush.Messages = 1000
	cf.Producer.Flush.MaxMessages = 1000
	cf.Producer.RequiredAcks = sarama.WaitForLocal
	cf.Producer.Partitioner = sarama.NewHashPartitioner
	cf.Producer.Timeout = time.Second
	//cf.Producer.Compression = sarama.CompressionSnappy
	cf.Producer.Retry.Max = 3
	producer, err := sarama.NewAsyncProducer([]string{"localhost:9092"}, cf)
	if err != nil {
		stress.IncCounter("fail", 1)
		log.Println(err)
		return
	}

	defer producer.Close()
	msg := strings.Repeat("X", sz)
	for i := 0; i < loops; i++ {
		producer.Input() <- &sarama.ProducerMessage{
			Topic: topic,
			Value: sarama.StringEncoder(msg),
		}
		stress.IncCounter("ok", 1)
	}

}
Exemplo n.º 11
0
func (this *Mirror) makePub(c2 *zk.ZkCluster) (sarama.AsyncProducer, error) {
	cf := sarama.NewConfig()
	cf.Metadata.RefreshFrequency = time.Minute * 10
	cf.Metadata.Retry.Max = 3
	cf.Metadata.Retry.Backoff = time.Second * 3

	cf.ChannelBufferSize = 1000

	cf.Producer.Return.Errors = true
	cf.Producer.Flush.Messages = 2000         // 2000 message in batch
	cf.Producer.Flush.Frequency = time.Second // flush interval
	cf.Producer.Flush.MaxMessages = 0         // unlimited
	cf.Producer.RequiredAcks = sarama.WaitForLocal
	cf.Producer.Retry.Backoff = time.Second * 4
	cf.Producer.Retry.Max = 3
	cf.Net.DialTimeout = time.Second * 30
	cf.Net.WriteTimeout = time.Second * 30
	cf.Net.ReadTimeout = time.Second * 30

	switch this.Compress {
	case "gzip":
		cf.Producer.Compression = sarama.CompressionGZIP

	case "snappy":
		cf.Producer.Compression = sarama.CompressionSnappy
	}
	return sarama.NewAsyncProducer(c2.BrokerList(), cf)
}
Exemplo n.º 12
0
func NewKafkaProducer() (*IndeedKafkaProducer, error) {
	config := sarama.NewConfig()
	config.ClientID = ipresolver.GetLocalAddr()
	config.Producer.RequiredAcks = sarama.WaitForLocal
	config.Producer.Compression = sarama.CompressionNone
	config.Producer.Return.Successes = true
	config.Producer.Return.Errors = true
	config.Producer.Partitioner = sarama.NewHashPartitioner
	asyncProducer, err := sarama.NewAsyncProducer(eatonconfig.KafkaServers, config)
	if err != nil {
		return nil, err
	}
	go func() {
		for msg := range asyncProducer.Successes() {
			eatonevents.Info(fmt.Sprintf("Successfully sent message to topic %s with key %s", msg.Topic, msg.Key))
		}
	}()
	go func() {
		for err := range asyncProducer.Errors() {
			eatonevents.Error("Failed to send message due to error: ", err)
		}
	}()
	return &IndeedKafkaProducer{
		producer: asyncProducer,
	}, nil
}
Exemplo n.º 13
0
func NewKafka(numberOfMessages int, testLatency bool) *Kafka {
	config := sarama.NewConfig()
	client, _ := sarama.NewClient([]string{"localhost:9092"}, config)

	topic := "test"
	pub, _ := sarama.NewAsyncProducer([]string{"localhost:9092"}, config)
	consumer, _ := sarama.NewConsumerFromClient(client)
	sub, _ := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)

	var handler benchmark.MessageHandler
	if testLatency {
		handler = &benchmark.LatencyMessageHandler{
			NumberOfMessages: numberOfMessages,
			Latencies:        []float32{},
		}
	} else {
		handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages}
	}

	return &Kafka{
		handler: handler,
		client:  client,
		pub:     pub,
		sub:     sub,
		topic:   topic,
	}
}
Exemplo n.º 14
0
func queueInit() {

	config := sarama.NewConfig()

	config.ClientID = args.ID

	// Acks
	if args.Pub.Ack {
		config.Producer.RequiredAcks = sarama.WaitForAll
	} else {
		config.Producer.RequiredAcks = sarama.WaitForLocal
	}

	// Compress
	if args.Pub.Compress {
		config.Producer.Compression = sarama.CompressionSnappy
	} else {
		config.Producer.Compression = sarama.CompressionNone
	}

	// Flush Intervals
	if args.Pub.FlushFreq > 0 {
		config.Producer.Flush.Frequency = time.Duration(args.Pub.FlushFreq) * time.Second
	} else {
		config.Producer.Flush.Frequency = 1 * time.Second
	}

	producer, err := sarama.NewAsyncProducer(args.Pub.URI, config)
	if err != nil {
		log.Fatalln("Failed to start Kafka producer:", err)
	}

	qProducer = producer

}
Exemplo n.º 15
0
func main() {

	config := sarama.NewConfig()
	config.Producer.Compression = sarama.CompressionSnappy

	flag.StringVar(&kafkaBrokers, "brokers", "localhost:9092", "The kafka broker addresses")
	flag.Parse()

	brokers := []string{}

	for _, broker := range strings.Split(kafkaBrokers, ",") {
		brokers = append(brokers, broker)
	}

	producer, err := sarama.NewAsyncProducer(brokers, config)
	if err == nil {
		fmt.Println("Connected to Kafka brokers", "["+kafkaBrokers+"]")
		ifaces, err := net.Interfaces()
		if err != nil {
			log.Fatal("Cannot get network interfaces")
		}
		for _, iface := range ifaces {
			addrs, _ := iface.Addrs()
			if iface.Name != "lo" && len(addrs) > 0 {
				fmt.Printf("Starting live capture on %s interface...", iface.Name)
				decodePackets(iface.Name, producer)
			}

		}
	} else {
		log.Fatal("Can't create the Kafka producer")
	}
}
Exemplo n.º 16
0
// NewKafkaOutput creates instance of kafka producer client.
func NewKafkaOutput(address string, config *KafkaConfig) io.Writer {
	c := sarama.NewConfig()
	c.Producer.RequiredAcks = sarama.WaitForLocal
	c.Producer.Compression = sarama.CompressionSnappy
	c.Producer.Flush.Frequency = KafkaOutputFrequency * time.Millisecond

	brokerList := strings.Split(config.host, ",")

	producer, err := sarama.NewAsyncProducer(brokerList, c)
	if err != nil {
		log.Fatalln("Failed to start Sarama(Kafka) producer:", err)
	}

	o := &KafkaOutput{
		config:   config,
		producer: producer,
	}

	if Settings.verbose {
		// Start infinite loop for tracking errors for kafka producer.
		go o.ErrorHandler()
	}

	return o
}
Exemplo n.º 17
0
func NewPipelineKafka(host, db string) *PipelineKafka {
	brokerList := strings.Split(host, ",")
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForLocal       // Only wait for the leader to ack
	config.Producer.Compression = sarama.CompressionSnappy   // Compress messages
	config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms
	producer, _ := sarama.NewAsyncProducer(brokerList, config)
	return &PipelineKafka{producer: producer, db: db}
}
Exemplo n.º 18
0
func Publish(input chan *FileEvent, source string, ctrl chan bool) {
	clientConfig := sarama.NewConfig()
	clientConfig.Producer.RequiredAcks = sarama.WaitForLocal
	clientConfig.Producer.Compression = sarama.CompressionSnappy
	clientConfig.Producer.Flush.Frequency = 500 * time.Millisecond
	clientConfig.Producer.Flush.Messages = 200
	clientConfig.Producer.Flush.MaxMessages = 200
	clientConfig.Producer.Flush.Bytes = 16384
	clientConfig.Producer.Return.Successes = true
	clientConfig.Producer.Partitioner = sarama.NewRoundRobinPartitioner
	clientConfig.ChannelBufferSize = kafkabuffer

	//brokerList := []string{"127.0.0.1:9092"}
	var producer sarama.AsyncProducer
	var err error
	for {
		producer, err = sarama.NewAsyncProducer(brokerList, clientConfig)
		if err != nil {
			log.Error("Publish: Failed to start Sarama producer: ", err)
			log.Info("waiting....")
			time.Sleep(1 * time.Second)
		} else {
			break
		}
	}

	defer func() {
		if err := producer.Close(); err != nil {
			log.Error("Failed to shutdown producer cleanly", err)
		}
	}()

	registrar := &Registrar{source: source, publishCtrl: ctrl}
	go registrar.RegistrarDo(producer.Errors(), producer.Successes())

	topic := kafkaTopic
	baseName := filepath.Base(source)
	if len(topicmap) > 0 {
		tmpTopic := genTopic(baseName, topicmap)
		if tmpTopic != "" {
			topic = tmpTopic
		}
	}

	key := hashKey
	for event := range input {
		log.Debugf("%v, %v, %v, %v", *event.Source, *event.Text, event.Line, event.Offset)
		key = strconv.FormatInt(event.Offset, 10)
		producer.Input() <- &sarama.ProducerMessage{
			Topic:    topic,
			Key:      sarama.StringEncoder(key),
			Value:    sarama.StringEncoder(*event.Text),
			Metadata: event,
		}
	}

}
Exemplo n.º 19
0
func (b *Broker) initAP() error {
	var err error
	b.ap, err = sarama.NewAsyncProducer(b.config.brokerServerList, b.brokerConfig)
	if err != nil {
		return err
	}
	b.produceChan = make(chan *sarama.ProducerMessage, 64)
	return nil
}
Exemplo n.º 20
0
func main() {

	// Setup configuration
	config := sarama.NewConfig()
	// Return specifies what channels will be populated.
	// If they are set to true, you must read from
	// config.Producer.Return.Successes = true
	// The total number of times to retry sending a message (default 3).
	config.Producer.Retry.Max = 5
	// The level of acknowledgement reliability needed from the broker.
	config.Producer.RequiredAcks = sarama.WaitForAll
	brokers := []string{"localhost:9092"}
	producer, err := sarama.NewAsyncProducer(brokers, config)
	if err != nil {
		// Should not reach here
		panic(err)
	}

	defer func() {
		if err := producer.Close(); err != nil {
			// Should not reach here
			panic(err)
		}
	}()

	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)

	var enqueued, errors int
	doneCh := make(chan struct{})
	go func() {
		for {

			time.Sleep(500 * time.Millisecond)

			strTime := strconv.Itoa(int(time.Now().Unix()))
			msg := &sarama.ProducerMessage{
				Topic: "important",
				Key:   sarama.StringEncoder(strTime),
				Value: sarama.StringEncoder("Something Cool"),
			}
			select {
			case producer.Input() <- msg:
				enqueued++
				fmt.Println("Produce message")
			case err := <-producer.Errors():
				errors++
				fmt.Println("Failed to produce message:", err)
			case <-signals:
				doneCh <- struct{}{}
			}
		}
	}()

	<-doneCh
	log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors)
}
Exemplo n.º 21
0
func main() {

	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll
	brokers := []string{"localhost:9092"}
	producer, err := sarama.NewAsyncProducer(brokers, config)

	if err != nil {
		panic(err)
	}

	defer func() {
		if err := producer.Close(); err != nil {
			panic(err)
		}
	}()

	deviceIds := [1]string{"28-00000626aa4d"}

	for i := 0; i < len(deviceIds); i++ {

		deviceId := deviceIds[i]

		go func() {

			for {

				temperatureValue := getTemperatureValue(deviceId)

				fmt.Println(temperatureValue)

				msg := &sarama.ProducerMessage{
					Topic: "important",
					Key:   sarama.StringEncoder(deviceId),
					Value: sarama.StringEncoder(strconv.FormatFloat(temperatureValue, 'E', -1, 64)),
				}

				select {
				case producer.Input() <- msg:
					fmt.Println("Produce message")
				case err := <-producer.Errors():
					fmt.Println("Failed to produce message:", err)
				}

				time.Sleep(5 * time.Second)

			}

		}()

	}

	select {}

}
Exemplo n.º 22
0
func (suite *KafkaTester) Test01() {
	t := suite.T()
	assert := assert.New(t)

	const M1 = "message one"
	const M2 = "message two"

	var producer sarama.AsyncProducer
	var consumer sarama.Consumer
	var partitionConsumer sarama.PartitionConsumer

	var err error

	topic := makeTopicName()

	{
		config := sarama.NewConfig()
		config.Producer.Return.Successes = false
		config.Producer.Return.Errors = false

		producer, err = sarama.NewAsyncProducer([]string{suite.server}, config)
		assert.NoError(err)
		defer close(t, producer)

		producer.Input() <- &sarama.ProducerMessage{
			Topic: topic,
			Key:   nil,
			Value: sarama.StringEncoder(M1)}

		producer.Input() <- &sarama.ProducerMessage{
			Topic: topic,
			Key:   nil,
			Value: sarama.StringEncoder(M2)}
	}

	{
		consumer, err = sarama.NewConsumer([]string{suite.server}, nil)
		assert.NoError(err)
		defer close(t, consumer)

		partitionConsumer, err = consumer.ConsumePartition(topic, 0, 0)
		assert.NoError(err)
		defer close(t, partitionConsumer)
	}

	{
		mssg1 := <-partitionConsumer.Messages()
		//t.Logf("Consumed: offset:%d  value:%v", mssg1.Offset, string(mssg1.Value))
		mssg2 := <-partitionConsumer.Messages()
		//t.Logf("Consumed: offset:%d  value:%v", mssg2.Offset, string(mssg2.Value))

		assert.EqualValues(M1, string(mssg1.Value))
		assert.EqualValues(M2, string(mssg2.Value))
	}
}
Exemplo n.º 23
0
func InitKafka(kafkaAddrs []string) (err error) {
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.NoResponse
	config.Producer.Partitioner = sarama.NewHashPartitioner
	config.Producer.Return.Successes = true
	config.Producer.Return.Errors = true
	producer, err = sarama.NewAsyncProducer(kafkaAddrs, config)
	go handleSuccess()
	go handleError()
	return
}
Exemplo n.º 24
0
func newProducer() AsyncProducer {
	var producer kafka.AsyncProducer
	config := kafka.NewConfig()

	_ = retry(func() (err error) {
		producer, err = kafka.NewAsyncProducer(kafkas, config)
		return
	})

	return AsyncProducer{producer}
}
Exemplo n.º 25
0
func (this *pubPool) asyncProducerFactory() (pool.Resource, error) {
	if len(this.brokerList) == 0 {
		return nil, store.ErrEmptyBrokers
	}

	apc := &asyncProducerClient{
		rp:      this.asyncPool,
		cluster: this.cluster,
		id:      atomic.AddUint64(&this.nextId, 1),
	}

	var err error
	t1 := time.Now()
	cf := sarama.NewConfig()
	cf.Net.DialTimeout = time.Second * 4
	cf.Net.ReadTimeout = time.Second * 4
	cf.Net.WriteTimeout = time.Second * 4

	cf.Metadata.RefreshFrequency = time.Minute * 10
	cf.Metadata.Retry.Max = 3
	cf.Metadata.Retry.Backoff = time.Millisecond * 10

	cf.Producer.Flush.Frequency = time.Second * 10 // TODO
	cf.Producer.Flush.Messages = 1000
	cf.Producer.Flush.MaxMessages = 0 // unlimited

	cf.Producer.RequiredAcks = sarama.NoResponse
	cf.Producer.Partitioner = NewExclusivePartitioner
	cf.Producer.Retry.Backoff = time.Millisecond * 10 // gk migrate will trigger this backoff
	cf.Producer.Retry.Max = 3
	if this.store.compress {
		cf.Producer.Compression = sarama.CompressionSnappy
	}

	cf.ClientID = this.store.hostname

	apc.AsyncProducer, err = sarama.NewAsyncProducer(this.brokerList, cf)
	if err != nil {
		return nil, err
	}

	log.Trace("cluster[%s] kafka async producer connected[%d]: %+v %s",
		this.cluster, apc.id, this.brokerList, time.Since(t1))

	// TODO
	go func() {
		// messages will only be returned here after all retry attempts are exhausted.
		for err := range apc.Errors() {
			log.Error("cluster[%s] kafka async producer: %v", this.cluster, err)
		}
	}()

	return apc, err
}
Exemplo n.º 26
0
// Creates a kafka async producer utilizing github.com/Shopify/sarama
func newKafkaAsyncProducer(brokers string) (kafka.AsyncProducer, error) {
	config := kafka.NewConfig()
	config.Producer.RequiredAcks = kafka.WaitForAll
	var brokerList = []string{brokers}
	producer, err := kafka.NewAsyncProducer(brokerList, config)

	if err != nil {
		return nil, err
	}
	return producer, err
}
Exemplo n.º 27
0
func pumpData(conf *Config, users chan models.User) {
	config := sarama.NewConfig()
	config.Producer.Return.Successes = true
	producer, err := sarama.NewAsyncProducer(conf.Brokers, config)
	if err != nil {
		log.Fatalf("Can't create producer! Err: %v", err)
	}

	// Trap SIGINT to trigger a graceful shutdown.
	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)

	var (
		wg                          sync.WaitGroup
		enqueued, successes, errors int
	)

	wg.Add(1)
	go func() {
		defer wg.Done()
		for _ = range producer.Successes() {
			successes++
		}
	}()

	wg.Add(1)
	go func() {
		defer wg.Done()
		for err := range producer.Errors() {
			log.Println(err)
			errors++
		}
	}()

ProducerLoop:
	for user := range users {
		b, _ := json.Marshal(user)
		message := &sarama.ProducerMessage{Topic: conf.Topic, Value: sarama.ByteEncoder(b)}
		select {
		case producer.Input() <- message:
			enqueued++

		case <-signals:
			producer.AsyncClose() // Trigger a shutdown of the producer.
			break ProducerLoop
		}
	}

	producer.AsyncClose()

	wg.Wait()

	log.Printf("Successfully produced: %d; errors: %d", successes, errors)
}
Exemplo n.º 28
0
func (this *Mirror) makePub(c2 *zk.ZkCluster) (sarama.AsyncProducer, error) {
	// TODO setup batch size
	cf := sarama.NewConfig()
	switch this.compress {
	case "gzip":
		cf.Producer.Compression = sarama.CompressionGZIP

	case "snappy":
		cf.Producer.Compression = sarama.CompressionSnappy
	}
	return sarama.NewAsyncProducer(c2.BrokerList(), cf)
}
Exemplo n.º 29
0
func main() {

	log.Println("Starting ... ")

	asyncProducer, err := sarama.NewAsyncProducer(kafkaAddrs, sarama.NewConfig())

	if err != nil {
		log.Fatalf("cannot create the AsyncProducer")
	}

	asyncProducer.Input() <- &sarama.ProducerMessage{Topic: topic}

	//asyncProducer.
}
Exemplo n.º 30
-2
// NewPeer creates and returns a new Peer for communicating with Kafka.
func NewPeer(host string) (*Peer, error) {
	host = strings.Split(host, ":")[0] + ":9092"
	config := sarama.NewConfig()
	client, err := sarama.NewClient([]string{host}, config)
	if err != nil {
		return nil, err
	}

	producer, err := sarama.NewAsyncProducer([]string{host}, config)
	if err != nil {
		return nil, err
	}

	consumer, err := sarama.NewConsumer([]string{host}, config)
	if err != nil {
		return nil, err
	}

	partitionConsumer, err := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
	if err != nil {
		return nil, err
	}

	return &Peer{
		client:   client,
		producer: producer,
		consumer: partitionConsumer,
		send:     make(chan []byte),
		errors:   make(chan error, 1),
		done:     make(chan bool),
	}, nil
}