Ejemplo n.º 1
0
func (k *Kafka) Write(points []*client.Point) error {
	if len(points) == 0 {
		return nil
	}

	for _, p := range points {
		// Combine tags from Point and BatchPoints and grab the resulting
		// line-protocol output string to write to Kafka
		value := p.String()

		m := &sarama.ProducerMessage{
			Topic: k.Topic,
			Value: sarama.StringEncoder(value),
		}
		if h, ok := p.Tags()[k.RoutingTag]; ok {
			m.Key = sarama.StringEncoder(h)
		}

		_, _, err := k.producer.SendMessage(m)
		if err != nil {
			return errors.New(fmt.Sprintf("FAILED to send kafka message: %s\n",
				err))
		}
	}
	return nil
}
Ejemplo n.º 2
0
func forwarding(pl ...ProcessLogic) ProcessLogic {
	return eventProcessor(func(n fsmonitor.Notice) {

		for _, p := range pl {
			p.Process(n)
		}

		/* If no message key set, all messages will be distributed randomly
		 * over the different partitions.*/
		partition, offset, err := noticeSender.SendMessage(&sarama.ProducerMessage{
			Topic: *topic,
			Key:   sarama.StringEncoder(fmt.Sprintf("%v", n.Type())),
			Value: sarama.StringEncoder(n.Name()),
		})

		if err != nil {
			Logger.Printf("Failed to store your data:, %s", err)
		} else {
			// The tuple (topic, partition, offset) can be used as a unique identifier

			// for a message in a Kafka cluster.
			Logger.Printf("Your data is stored with unique identifier kafka://%s/%d/%d", *topic, partition, offset)
		}
	})
}
Ejemplo n.º 3
0
func (q KafkaTopic) Enqueue(key, value string) {
	q.Producable <- &sarama.ProducerMessage{
		Topic: q.Topic,
		Key:   sarama.StringEncoder(key),
		Value: sarama.StringEncoder(value),
	}
}
Ejemplo n.º 4
0
func (k *Kafka) Write(metrics []telegraf.Metric) error {
	if len(metrics) == 0 {
		return nil
	}

	for _, metric := range metrics {
		values, err := k.serializer.Serialize(metric)
		if err != nil {
			return err
		}

		var pubErr error
		for _, value := range values {
			m := &sarama.ProducerMessage{
				Topic: k.Topic,
				Value: sarama.StringEncoder(value),
			}
			if h, ok := metric.Tags()[k.RoutingTag]; ok {
				m.Key = sarama.StringEncoder(h)
			}

			_, _, pubErr = k.producer.SendMessage(m)
		}

		if pubErr != nil {
			return fmt.Errorf("FAILED to send kafka message: %s\n", pubErr)
		}
	}
	return nil
}
Ejemplo n.º 5
0
func Publish(input chan *FileEvent, source string, ctrl chan bool) {
	clientConfig := sarama.NewConfig()
	clientConfig.Producer.RequiredAcks = sarama.WaitForLocal
	clientConfig.Producer.Compression = sarama.CompressionSnappy
	clientConfig.Producer.Flush.Frequency = 500 * time.Millisecond
	clientConfig.Producer.Flush.Messages = 200
	clientConfig.Producer.Flush.MaxMessages = 200
	clientConfig.Producer.Flush.Bytes = 16384
	clientConfig.Producer.Return.Successes = true
	clientConfig.Producer.Partitioner = sarama.NewRoundRobinPartitioner
	clientConfig.ChannelBufferSize = kafkabuffer

	//brokerList := []string{"127.0.0.1:9092"}
	var producer sarama.AsyncProducer
	var err error
	for {
		producer, err = sarama.NewAsyncProducer(brokerList, clientConfig)
		if err != nil {
			log.Error("Publish: Failed to start Sarama producer: ", err)
			log.Info("waiting....")
			time.Sleep(1 * time.Second)
		} else {
			break
		}
	}

	defer func() {
		if err := producer.Close(); err != nil {
			log.Error("Failed to shutdown producer cleanly", err)
		}
	}()

	registrar := &Registrar{source: source, publishCtrl: ctrl}
	go registrar.RegistrarDo(producer.Errors(), producer.Successes())

	topic := kafkaTopic
	baseName := filepath.Base(source)
	if len(topicmap) > 0 {
		tmpTopic := genTopic(baseName, topicmap)
		if tmpTopic != "" {
			topic = tmpTopic
		}
	}

	key := hashKey
	for event := range input {
		log.Debugf("%v, %v, %v, %v", *event.Source, *event.Text, event.Line, event.Offset)
		key = strconv.FormatInt(event.Offset, 10)
		producer.Input() <- &sarama.ProducerMessage{
			Topic:    topic,
			Key:      sarama.StringEncoder(key),
			Value:    sarama.StringEncoder(*event.Text),
			Metadata: event,
		}
	}

}
Ejemplo n.º 6
0
func main() {

	// Setup configuration
	config := sarama.NewConfig()
	// Return specifies what channels will be populated.
	// If they are set to true, you must read from
	// config.Producer.Return.Successes = true
	// The total number of times to retry sending a message (default 3).
	config.Producer.Retry.Max = 5
	// The level of acknowledgement reliability needed from the broker.
	config.Producer.RequiredAcks = sarama.WaitForAll
	brokers := []string{"localhost:9092"}
	producer, err := sarama.NewAsyncProducer(brokers, config)
	if err != nil {
		// Should not reach here
		panic(err)
	}

	defer func() {
		if err := producer.Close(); err != nil {
			// Should not reach here
			panic(err)
		}
	}()

	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)

	var enqueued, errors int
	doneCh := make(chan struct{})
	go func() {
		for {

			time.Sleep(500 * time.Millisecond)

			strTime := strconv.Itoa(int(time.Now().Unix()))
			msg := &sarama.ProducerMessage{
				Topic: "important",
				Key:   sarama.StringEncoder(strTime),
				Value: sarama.StringEncoder("Something Cool"),
			}
			select {
			case producer.Input() <- msg:
				enqueued++
				fmt.Println("Produce message")
			case err := <-producer.Errors():
				errors++
				fmt.Println("Failed to produce message:", err)
			case <-signals:
				doneCh <- struct{}{}
			}
		}
	}()

	<-doneCh
	log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors)
}
Ejemplo n.º 7
0
func (suite *KafkaTester) Test01() {
	t := suite.T()
	assert := assert.New(t)

	const M1 = "message one"
	const M2 = "message two"

	var producer sarama.AsyncProducer
	var consumer sarama.Consumer
	var partitionConsumer sarama.PartitionConsumer

	var err error

	topic := makeTopicName()

	{
		config := sarama.NewConfig()
		config.Producer.Return.Successes = false
		config.Producer.Return.Errors = false

		producer, err = sarama.NewAsyncProducer([]string{suite.server}, config)
		assert.NoError(err)
		defer close(t, producer)

		producer.Input() <- &sarama.ProducerMessage{
			Topic: topic,
			Key:   nil,
			Value: sarama.StringEncoder(M1)}

		producer.Input() <- &sarama.ProducerMessage{
			Topic: topic,
			Key:   nil,
			Value: sarama.StringEncoder(M2)}
	}

	{
		consumer, err = sarama.NewConsumer([]string{suite.server}, nil)
		assert.NoError(err)
		defer close(t, consumer)

		partitionConsumer, err = consumer.ConsumePartition(topic, 0, 0)
		assert.NoError(err)
		defer close(t, partitionConsumer)
	}

	{
		mssg1 := <-partitionConsumer.Messages()
		//t.Logf("Consumed: offset:%d  value:%v", mssg1.Offset, string(mssg1.Value))
		mssg2 := <-partitionConsumer.Messages()
		//t.Logf("Consumed: offset:%d  value:%v", mssg2.Offset, string(mssg2.Value))

		assert.EqualValues(M1, string(mssg1.Value))
		assert.EqualValues(M2, string(mssg2.Value))
	}
}
Ejemplo n.º 8
0
func main() {

	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll
	brokers := []string{"localhost:9092"}
	producer, err := sarama.NewAsyncProducer(brokers, config)

	if err != nil {
		panic(err)
	}

	defer func() {
		if err := producer.Close(); err != nil {
			panic(err)
		}
	}()

	deviceIds := [1]string{"28-00000626aa4d"}

	for i := 0; i < len(deviceIds); i++ {

		deviceId := deviceIds[i]

		go func() {

			for {

				temperatureValue := getTemperatureValue(deviceId)

				fmt.Println(temperatureValue)

				msg := &sarama.ProducerMessage{
					Topic: "important",
					Key:   sarama.StringEncoder(deviceId),
					Value: sarama.StringEncoder(strconv.FormatFloat(temperatureValue, 'E', -1, 64)),
				}

				select {
				case producer.Input() <- msg:
					fmt.Println("Produce message")
				case err := <-producer.Errors():
					fmt.Println("Failed to produce message:", err)
				}

				time.Sleep(5 * time.Second)

			}

		}()

	}

	select {}

}
Ejemplo n.º 9
0
func main() {
	configFile := flag.String("c", "", "Config file")
	messageValue := flag.String("m", "", "Message")
	amount := flag.Int("a", 1, "Amount of messages")
	flag.Parse()

	if *configFile == "" || *messageValue == "" {
		flag.PrintDefaults()
		os.Exit(1)
	}

	options, err := revolver.LoadOptions(*configFile)
	if err != nil {
		log.Fatalln(err)
		os.Exit(1)
	}

	sarama.Logger = logger

	var keyEncoder, valueEncoder sarama.Encoder

	keyEncoder = sarama.StringEncoder(time.Now().String())
	if *messageValue != "" {
		valueEncoder = sarama.StringEncoder(*messageValue)
	}

	config := sarama.NewConfig()
	config.Producer.Partitioner = sarama.NewRandomPartitioner
	producer, err := sarama.NewSyncProducer(options.Brokers, config)
	if err != nil {
		logger.Fatalln("FAILED to open the producer:", err)
	}
	defer producer.Close()
	topic := options.KafkaTopics[0]

	for i := 0; i < *amount; i++ {
		partition, offset, err := producer.SendMessage(&sarama.ProducerMessage{
			Topic: topic,
			Key:   keyEncoder,
			Value: valueEncoder,
		})

		if err != nil {
			logger.Println("FAILED to produce message:", err)

		} else {
			logger.Printf("msg: %d, topic=%s\tpartition=%d\toffset=%d\n", i, topic, partition, offset)
		}
	}
}
Ejemplo n.º 10
0
func pubKafkaLoop(seq int) {
	cf := sarama.NewConfig()
	cf.Producer.RequiredAcks = sarama.WaitForLocal
	cf.Producer.Partitioner = sarama.NewHashPartitioner
	cf.Producer.Timeout = time.Second
	//cf.Producer.Compression = sarama.CompressionSnappy
	cf.Producer.Retry.Max = 3
	producer, err := sarama.NewSyncProducer([]string{"localhost:9092"}, cf)
	if err != nil {
		stress.IncCounter("fail", 1)
		log.Println(err)
		return
	}

	defer producer.Close()
	msg := strings.Repeat("X", sz)
	for i := 0; i < loops; i++ {
		_, _, err := producer.SendMessage(&sarama.ProducerMessage{
			Topic: topic,
			Value: sarama.StringEncoder(msg),
		})
		if err == nil {
			stress.IncCounter("ok", 1)
		} else {
			stress.IncCounter("fail", 1)
		}
	}

}
Ejemplo n.º 11
0
func main() {
	flag.Parse()

	if *brokers == "" {
		flag.PrintDefaults()
		os.Exit(1)
	}

	brokerList := strings.Split(*brokers, ",")
	producer := newAsyncProducer(brokerList)

	f, err := os.Open(*filename)
	defer f.Close()

	if err != nil {
		log.Fatal("Could not open raw file:", err)
	}

	scanner := bufio.NewScanner(f)
	scanner.Split(bufio.ScanLines)

	for scanner.Scan() {
		producer.Input() <- &sarama.ProducerMessage{
			Topic: *topic,
			Value: sarama.StringEncoder(scanner.Text()),
		}
	}
}
Ejemplo n.º 12
0
func (tp *TypedProducer) produce(cmData *CmData) {
	// logger.Debug("produce requiredAcks=%d", int(tp.requiredAcks))
	// fetch and fill
    pmpe := tp.pmp.fetch()
	pmpe.privData = cmData
	pmsg := pmpe.pmsg
	pmsg.Topic = cmData.topic
	if len(cmData.key) == 0 {
		// if key is empty, using sarama.RandomPartitioner
		pmsg.Key = nil
	} else {
	    pmsg.Key = sarama.StringEncoder(cmData.key)
	}
	pmsg.Value = sarama.ByteEncoder(cmData.data)
	pmsg.Metadata = pmpe
	// do produce
	for {
		select {
			case tp.ap.Input() <-pmsg:
				return
			case perr := <-tp.ap.Errors():
				tp.processProduceErrors(perr)
		}
	}
}
Ejemplo n.º 13
0
func (o *KafkaOutput) Write(data []byte) (n int, err error) {
	headers := make(map[string]string)
	proto.ParseHeaders([][]byte{data}, func(header []byte, value []byte) bool {
		headers[string(header)] = string(value)
		return true
	})

	req := payloadBody(data)

	kafkaMessage := KafkaMessage{
		ReqURL:     string(proto.Path(req)),
		ReqMethod:  string(proto.Method(req)),
		ReqBody:    string(proto.Body(req)),
		ReqHeaders: headers,
	}
	jsonMessage, _ := json.Marshal(&kafkaMessage)
	message := sarama.StringEncoder(jsonMessage)

	o.producer.Input() <- &sarama.ProducerMessage{
		Topic: o.config.topic,
		Value: message,
	}

	return len(message), nil
}
Ejemplo n.º 14
0
func broadcastKafka(msg []byte) (err error) {
	message := &sarama.ProducerMessage{Topic: KafkaPushsTopic, Key: sarama.StringEncoder(define.KAFKA_MESSAGE_BROADCAST), Value: sarama.ByteEncoder(msg)}
	if _, _, err = producer.SendMessage(message); err != nil {
		return
	}
	return
}
Ejemplo n.º 15
0
func broadcastRoomKafka(ridStr string, msg []byte) (err error) {
	message := &sarama.ProducerMessage{Topic: KafkaPushsTopic, Key: sarama.StringEncoder(ridStr), Value: sarama.ByteEncoder(msg)}
	if _, _, err = producer.SendMessage(message); err != nil {
		return
	}
	return
}
Ejemplo n.º 16
0
func work() error {
	config, err := initConfig()
	if err != nil {
		return err
	}

	client, producer, err := gomkafka.Gomkafka(config)
	if err != nil {
		panic(err)
	}
	defer client.Close()
	defer producer.Close()

	in := bufio.NewReader(os.Stdin)

	for {
		msg, err := in.ReadString('\n')
		if err != nil {
			return err
		}

		err = producer.SendMessage("monitoring", nil, kafka.StringEncoder(msg))
		if err != nil {
			return err
		}

		time.Sleep(1 * time.Millisecond)
	}
}
Ejemplo n.º 17
0
func main() {
	flag.Parse()

	if *brokers == "" {
		flag.PrintDefaults()
		os.Exit(1)
	}

	brokerList := strings.Split(*brokers, ",")
	producer := newSyncProducer(brokerList)

	resp, _ := http.Get("http://developer.usa.gov/1usagov")
	reader := bufio.NewReader(resp.Body)

	for {
		line, err := reader.ReadBytes('\n')
		if err != nil {
			log.Println("Failed to ReadBytes:", err)
		}

		if isJSON(line) {
			producer.SendMessage(&sarama.ProducerMessage{
				Topic: *topic,
				Value: sarama.StringEncoder(line),
			})
		}
	}
}
Ejemplo n.º 18
0
func main() {
	flag.Parse()
	if *host == "" || *topic == "" || *logfile == "" {
		fmt.Printf("pararm error,host=%s,topic=%s,logfile=%s\n", *host, *topic, *logfile)
		os.Exit(0)
	}

	hosts := strings.Split(*host, ",")
	producer, err := sarama.NewSyncProducer(hosts, nil)
	if err != nil {
		fmt.Printf("create kafka syncproducer fail. %+v\n", err)
		os.Exit(-1)
	}
	defer producer.Close()

	file, err1 := os.Open(*logfile)
	if err1 != nil {
		fmt.Printf("open logfile %s fail. %+v\n", *logfile, err1)
		os.Exit(-2)
	}
	defer file.Close()

	scanner := bufio.NewScanner(file)
	for scanner.Scan() {
		msg := &sarama.ProducerMessage{Topic: *topic, Value: sarama.StringEncoder(scanner.Text())}
		_, _, err := producer.SendMessage(msg)
		if err != nil {
			fmt.Printf("FAILED to send message: %s\n", err)
		}
	}
	if err := scanner.Err(); err != nil {
		log.Fatal(err)
	}
}
Ejemplo n.º 19
0
func (c *KafkaClient) PutTopicMsg(topic, text string) {
	msg := &sarama.ProducerMessage{Topic: topic, Value: sarama.StringEncoder(text)}
	_, _, err := c.Porducer.SendMessage(msg)
	if err != nil {
		log.Printf("FAILED to send message: %s\n", err)
	}
}
Ejemplo n.º 20
0
func newProducerMessage(cp ChainPartition, payload []byte) *sarama.ProducerMessage {
	return &sarama.ProducerMessage{
		Topic: cp.Topic(),
		Key:   sarama.StringEncoder(strconv.Itoa(int(cp.Partition()))), // TODO Consider writing an IntEncoder?
		Value: sarama.ByteEncoder(payload),
	}
}
Ejemplo n.º 21
0
func generateKafkaData(t *testing.T, topic string) {
	config := sarama.NewConfig()
	client, err := sarama.NewClient([]string{getTestKafkaHost()}, config)
	if err != nil {
		t.Errorf("%s", err)
	}

	producer, err := sarama.NewSyncProducerFromClient(client)
	if err != nil {
		t.Error(err)
	}
	defer producer.Close()

	msg := &sarama.ProducerMessage{
		Topic: topic,
		Value: sarama.StringEncoder("Hello World"),
	}

	_, _, err = producer.SendMessage(msg)
	if err != nil {
		t.Errorf("FAILED to send message: %s\n", err)
	}

	client.RefreshMetadata(topic)
}
Ejemplo n.º 22
0
func (s *Server) withAccessLog(next http.Handler) http.Handler {

	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		started := time.Now()

		next.ServeHTTP(w, r)

		entry := &accessLogEntry{
			Method:       r.Method,
			Host:         r.Host,
			Path:         r.RequestURI,
			IP:           r.RemoteAddr,
			ResponseTime: float64(time.Since(started)) / float64(time.Second),
		}

		// We will use the client's IP address as key. This will cause
		// all the access log entries of the same IP address to end up
		// on the same partition.
		s.AccessLogProducer.Input() <- &sarama.ProducerMessage{
			Topic: "access_log",
			Key:   sarama.StringEncoder(r.RemoteAddr),
			Value: entry,
		}
	})
}
Ejemplo n.º 23
0
func main() {

	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll
	config.Producer.Retry.Max = 5

	// brokers := []string{"192.168.59.103:9092"}
	brokers := []string{"localhost:9092"}
	producer, err := sarama.NewSyncProducer(brokers, config)
	if err != nil {
		// Should not reach here
		panic(err)
	}

	defer func() {
		if err := producer.Close(); err != nil {
			// Should not reach here
			panic(err)
		}
	}()

	topic := "important"
	msg := &sarama.ProducerMessage{
		Topic: topic,
		Value: sarama.StringEncoder("Something Cool"),
	}

	partition, offset, err := producer.SendMessage(msg)
	if err != nil {
		panic(err)
	}

	fmt.Printf("Message is stored in topic(%s)/partition(%d)/offset(%d)\n", topic, partition, offset)
}
Ejemplo n.º 24
0
func (i *IndeedKafkaProducer) SendMessages(jobResultChannel <-chan mapping.JobResult) (<-chan error, <-chan int) {
	errorChannel := make(chan error)
	kafkaDoneChannel := make(chan int)
	go func() {
		defer close(errorChannel)
		defer close(kafkaDoneChannel)
		defer i.Close()
		for jobResult := range jobResultChannel {
			if jobResult.IsLast() {
				eatonevents.Debug("received last jobResult. returning from function and signaling that the job is complete.")
				kafkaDoneChannel <- 0
				return
			}
			bytes, err := xml.Marshal(jobResult)
			if err != nil {
				errorChannel <- err
				continue
			}

			eatonevents.Debug(fmt.Sprintf("Sending JobResult JobKey: %s", jobResult.JobKey))
			i.producer.Input() <- &sarama.ProducerMessage{
				Topic: eatonconfig.KafkaTopic,
				Value: sarama.ByteEncoder(bytes),
				Key:   sarama.StringEncoder(jobResult.JobKey),
			}
		}
	}()
	return errorChannel, kafkaDoneChannel
}
func TestReadsMetricsFromKafka(t *testing.T) {
	if testing.Short() {
		t.Skip("Skipping integration test in short mode")
	}

	brokerPeers := []string{testutil.GetLocalHost() + ":9092"}
	zkPeers := []string{testutil.GetLocalHost() + ":2181"}
	testTopic := fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix())

	// Send a Kafka message to the kafka host
	msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257"
	producer, err := sarama.NewSyncProducer(brokerPeers, nil)
	require.NoError(t, err)
	_, _, err = producer.SendMessage(
		&sarama.ProducerMessage{
			Topic: testTopic,
			Value: sarama.StringEncoder(msg),
		})
	require.NoError(t, err)
	defer producer.Close()

	// Start the Kafka Consumer
	k := &Kafka{
		ConsumerGroup:  "telegraf_test_consumers",
		Topics:         []string{testTopic},
		ZookeeperPeers: zkPeers,
		PointBuffer:    100000,
		Offset:         "oldest",
	}
	if err := k.Start(); err != nil {
		t.Fatal(err.Error())
	} else {
		defer k.Stop()
	}

	waitForPoint(k, t)

	// Verify that we can now gather the sent message
	var acc testutil.Accumulator
	// Sanity check
	assert.Equal(t, 0, len(acc.Points), "There should not be any points")

	// Gather points
	err = k.Gather(&acc)
	require.NoError(t, err)
	if len(acc.Points) == 1 {
		point := acc.Points[0]
		assert.Equal(t, "cpu_load_short", point.Measurement)
		assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields)
		assert.Equal(t, map[string]string{
			"host":      "server01",
			"direction": "in",
			"region":    "us-west",
		}, point.Tags)
		assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix())
	} else {
		t.Errorf("No points found in accumulator, expected 1")
	}
}
Ejemplo n.º 26
0
func (p *KafkaSender) Push(data map[string]interface{}) error {
	val := util.JsonString(data)
	_, _, err := producer.SendMessage(&sarama.ProducerMessage{
		Topic: p.topic,
		Value: sarama.StringEncoder(val),
	})
	return err
}
Ejemplo n.º 27
0
// SendStringMessage sends a string message to kafka
func (self *Kafka) SendStringMessage(msg string, event_type string) (partition int32, offset int64, err error) {
	if _, ok := self.topic[event_type]; !ok {
		event_type = "default"
	}
	message := &sarama.ProducerMessage{Topic: self.topic[event_type], Partition: self.partition}
	message.Value = sarama.StringEncoder(msg)
	return self.producer.SendMessage(message)
}
Ejemplo n.º 28
0
func main() {
	flag.Parse()

	if *verbose {
		sarama.Logger = logger
	}

	var partitionerConstructor sarama.PartitionerConstructor
	switch *partitioner {
	case "hash":
		partitionerConstructor = sarama.NewHashPartitioner
	case "random":
		partitionerConstructor = sarama.NewRandomPartitioner
	default:
		log.Fatalf("Partitioner %s not supported.", *partitioner)
	}

	var keyEncoder, valueEncoder sarama.Encoder
	if *key != "" {
		keyEncoder = sarama.StringEncoder(*key)
	}
	if *value != "" {
		valueEncoder = sarama.StringEncoder(*value)
	}

	config := sarama.NewConfig()
	config.Producer.Partitioner = partitionerConstructor

	producer, err := sarama.NewSyncProducer(strings.Split(*brokerList, ","), config)
	if err != nil {
		logger.Fatalln("FAILED to open the producer:", err)
	}
	defer producer.Close()

	partition, offset, err := producer.SendMessage(&sarama.ProducerMessage{
		Topic: *topic,
		Key:   keyEncoder,
		Value: valueEncoder,
	})

	if err != nil {
		logger.Println("FAILED to produce message:", err)
	} else {
		fmt.Printf("topic=%s\tpartition=%d\toffset=%d\n", *topic, partition, offset)
	}
}
Ejemplo n.º 29
0
func (k *Kafka) Write(bp client.BatchPoints) error {
	if len(bp.Points) == 0 {
		return nil
	}

	var zero_time time.Time
	for _, p := range bp.Points {
		// Combine tags from Point and BatchPoints and grab the resulting
		// line-protocol output string to write to Kafka
		var value string
		if p.Raw != "" {
			value = p.Raw
		} else {
			for k, v := range bp.Tags {
				if p.Tags == nil {
					p.Tags = make(map[string]string, len(bp.Tags))
				}
				p.Tags[k] = v
			}
			if p.Time == zero_time {
				if bp.Time == zero_time {
					p.Time = time.Now()
				} else {
					p.Time = bp.Time
				}
			}
			value = p.MarshalString()
		}

		m := &sarama.ProducerMessage{
			Topic: k.Topic,
			Value: sarama.StringEncoder(value),
		}
		if h, ok := p.Tags[k.RoutingTag]; ok {
			m.Key = sarama.StringEncoder(h)
		}

		_, _, err := k.producer.SendMessage(m)
		if err != nil {
			return errors.New(fmt.Sprintf("FAILED to send kafka message: %s\n",
				err))
		}
	}
	return nil
}
Ejemplo n.º 30
0
func TestPartitioner(t *testing.T) {
	key := "aaa"
	var partitionNums int32 = 3
	p := NewKafkaDefaultPartitioner("")
	m := &sarama.ProducerMessage{Key: sarama.StringEncoder(key)}
	pn, err := p.Partition(m, partitionNums)
	assert.Equal(t, nil, err)
	assert.Equal(t, c.JString(key).HashCode()%partitionNums, pn)
}