Ejemplo n.º 1
0
func NewKafka(numberOfMessages int, testLatency bool) *Kafka {
	config := sarama.NewConfig()
	client, _ := sarama.NewClient([]string{"localhost:9092"}, config)

	topic := "test"
	pub, _ := sarama.NewAsyncProducer([]string{"localhost:9092"}, config)
	consumer, _ := sarama.NewConsumerFromClient(client)
	sub, _ := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)

	var handler benchmark.MessageHandler
	if testLatency {
		handler = &benchmark.LatencyMessageHandler{
			NumberOfMessages: numberOfMessages,
			Latencies:        []float32{},
		}
	} else {
		handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages}
	}

	return &Kafka{
		handler: handler,
		client:  client,
		pub:     pub,
		sub:     sub,
		topic:   topic,
	}
}
Ejemplo n.º 2
0
func CreateKafkaTopic() *KafkaTopic {
	client, err := sarama.NewClient([]string{"kafka:9092"}, sarama.NewConfig())
	if err != nil {
		panic(err)
	} else {
		fmt.Printf("Kafka Client connected: %v\n", client)
	}

	topic := "http-request"
	producer, err := sarama.NewAsyncProducerFromClient(client)
	if err != nil {
		panic(err)
	} else {
		fmt.Printf("Kafka Producer connected: %v\n", producer)
	}
	producable := producer.Input()

	consumer, err := sarama.NewConsumerFromClient(client)
	if err != nil {
		panic(err)
	} else {
		fmt.Printf("Kafka Consumer connected: %v\n", consumer)
	}

	consumable, err := consumer.ConsumePartition(topic, 0, 0)
	if err != nil {
		panic(err)
	}

	return &KafkaTopic{client, topic, producer, producable, consumer, consumable}
}
Ejemplo n.º 3
0
func getMasterConsumer(client sarama.Client) sarama.Consumer {
	//fmt.Println("Getting consumer.")
	master, err := sarama.NewConsumerFromClient(client)
	if err != nil {
		panic(err)
	}
	return master
}
Ejemplo n.º 4
0
func runConsume(cmd *Command, args []string) {
	brokers := brokers()
	config := sarama.NewConfig()
	config.ClientID = "k consume"
	config.Consumer.Return.Errors = true
	client, err := sarama.NewClient(brokers, config)
	must(err)
	defer client.Close()

	consumer, err := sarama.NewConsumerFromClient(client)
	must(err)
	defer consumer.Close()

	signals := make(chan os.Signal, 1)
	defer close(signals)
	signal.Notify(signals, os.Interrupt)

	// calculate a starting offset
	_, newestOffset := offsets(client, topic, partition)
	must(err)
	startingOffset := newestOffset
	if offset < 0 {
		startingOffset = newestOffset + offset
	} else if offset > 0 {
		startingOffset = offset
	}

	// TODO: support consuming all partitions
	fmt.Fprintf(os.Stderr, "Using starting offset: %d\n", startingOffset)
	partConsumer, err := consumer.ConsumePartition(topic, partition, startingOffset)
	must(err)
	defer partConsumer.Close()

	var received, errors int
consumerLoop:
	for {
		select {
		case msg := <-partConsumer.Messages():
			if msg.Key != nil {
				fmt.Printf("%s\t", string(msg.Key))
			}
			fmt.Println(string(msg.Value))
			received++
			if n > 0 && received >= n {
				break consumerLoop
			}
		case err := <-partConsumer.Errors():
			fmt.Fprintf(os.Stderr, "Failed to receive message: %s\n", err)
			errors++
		case <-signals:
			break consumerLoop
		}
	}

	fmt.Fprintf(os.Stderr, "Messages received: %d, errors: %d\n", received, errors)
}
Ejemplo n.º 5
0
// Subscribe prepares the peer to consume messages.
func (k *Peer) Subscribe() error {
	consumer, err := sarama.NewConsumerFromClient(k.client)
	if err != nil {
		return err
	}
	pc, err := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
	if nil != err {
		return err
	}
	k.consumer = pc
	return nil
}
Ejemplo n.º 6
0
func (this *Peek) simpleConsumeTopic(zkcluster *zk.ZkCluster, kfk sarama.Client, topic string, partitionId int32,
	msgCh chan *sarama.ConsumerMessage) {
	consumer, err := sarama.NewConsumerFromClient(kfk)
	if err != nil {
		panic(err)
	}
	defer consumer.Close()

	if partitionId == -1 {
		// all partitions
		partitions, err := kfk.Partitions(topic)
		if err != nil {
			panic(err)
		}

		for _, p := range partitions {
			offset := this.offset
			if this.lastN > 0 {
				latestOffset, err := kfk.GetOffset(topic, p, sarama.OffsetNewest)
				swallow(err)

				oldestOffset, err := kfk.GetOffset(topic, p, sarama.OffsetOldest)
				swallow(err)

				offset = latestOffset - this.lastN
				if offset < oldestOffset {
					offset = oldestOffset
				}

				if offset == 0 {
					// no message in store
					return
				}
			}

			go this.consumePartition(zkcluster, kfk, consumer, topic, p, msgCh, offset)
		}

	} else {
		offset := this.offset
		if this.lastN > 0 {
			latestOffset, err := kfk.GetOffset(topic, partitionId, sarama.OffsetNewest)
			swallow(err)
			offset = latestOffset - this.lastN
			if offset < 0 {
				offset = sarama.OffsetOldest
			}
		}
		this.consumePartition(zkcluster, kfk, consumer, topic, partitionId, msgCh, offset)
	}

}
Ejemplo n.º 7
0
func New(kfkConn sarama.Client, transmitter Transmitter) (*ListenHandler, error) {
	consumer, err := sarama.NewConsumerFromClient(kfkConn)
	if err != nil {
		return nil, fmt.Errorf("Failed to create Kafka consumer: %v", err)
	}
	lh := &ListenHandler{
		listenbus:   make(chan chan<- listenRequest),
		transmitter: transmitter,
		consumer:    consumer,
		kill:        killchan.New(),
		dead:        killchan.New(),
	}
	go lh.run()
	return lh, nil
}
Ejemplo n.º 8
0
func main() {
	flag.Parse()

	if *verbose {
		sarama.Logger = logger
	}

	config := sarama.NewConfig()
	config.Producer.Return.Successes = true
	config.Producer.Return.Errors = true
	config.Consumer.Return.Errors = true

	client, err := sarama.NewClient(strings.Split(*brokerList, ","), config)
	if err != nil {
		logger.Fatalln("Failed to start Kafka client:", err)
	}
	defer func() {
		if err := client.Close(); err != nil {
			logger.Println("Failed to close client:", err)
		}
	}()

	producer, err := sarama.NewAsyncProducerFromClient(client)
	if err != nil {
		logger.Fatalln("Failed to start Kafka producer:", err)
	}

	consumer, err := sarama.NewConsumerFromClient(client)
	if err != nil {
		logger.Fatalln("Failed to start Kafka consumer:", err)
	}

	signal.Notify(shutdown, os.Interrupt, os.Kill, syscall.SIGHUP, syscall.SIGTERM)
	expectations := make(chan *sarama.ProducerMessage, ExpectationBufferSize)

	started := time.Now()

	var verifierWg sync.WaitGroup
	verifierWg.Add(2)
	go expectationProducer(producer, expectations, &verifierWg)
	go expectationConsumer(consumer, expectations, &verifierWg)
	verifierWg.Wait()

	stats.Print()

	logger.Println()
	logger.Printf("Done after %0.2fs.\n", float64(time.Since(started))/float64(time.Second))
}
Ejemplo n.º 9
0
// NewConsumer initializes a new consumer
func NewConsumer(addrs []string, groupID string, topics []string, config *Config) (*Consumer, error) {
	if config == nil {
		config = NewConfig()
	}

	if err := config.Validate(); err != nil {
		return nil, err
	}

	client, err := sarama.NewClient(addrs, &config.Config)
	if err != nil {
		return nil, err
	}

	csmr, err := sarama.NewConsumerFromClient(client)
	if err != nil {
		_ = client.Close()
		return nil, err
	}

	c := &Consumer{
		config: config,
		client: client,

		csmr: csmr,
		subs: newPartitionMap(),

		groupID: groupID,
		topics:  topics,

		dying: make(chan none),
		dead:  make(chan none),

		errors:        make(chan error, config.ChannelBufferSize),
		messages:      make(chan *sarama.ConsumerMessage, config.ChannelBufferSize),
		notifications: make(chan *Notification, 1),
	}
	if err := c.selectBroker(); err != nil {
		_ = client.Close()
		return nil, err
	}

	go c.mainLoop()
	return c, nil
}
Ejemplo n.º 10
0
func (this *WatchAppError) consumeAppErrLogs(msgChan chan<- *sarama.ConsumerMessage) error {
	var (
		cluster = os.Getenv("APPLOG_CLUSTER")
		topic   = os.Getenv("APPLOG_TOPIC")
	)

	if cluster == "" || topic == "" {
		return fmt.Errorf("empty cluster/topic params provided, kateway.apperr disabled")
	}

	zkcluster := this.Zkzone.NewCluster(cluster)
	brokerList := zkcluster.BrokerList()
	if len(brokerList) == 0 {
		return fmt.Errorf("cluster[%s] has empty brokers", cluster)
	}
	kfk, err := sarama.NewClient(brokerList, sarama.NewConfig())
	if err != nil {
		return err
	}
	defer kfk.Close()

	consumer, err := sarama.NewConsumerFromClient(kfk)
	if err != nil {
		return err
	}
	defer consumer.Close()

	partitions, err := kfk.Partitions(topic)
	if err != nil {
		return err
	}

	var wg sync.WaitGroup
	for _, p := range partitions {
		wg.Add(1)
		go this.consumePartition(zkcluster, consumer, topic, p, sarama.OffsetNewest, msgChan, &wg)
	}

	wg.Wait()
	return nil
}
Ejemplo n.º 11
0
func (f *File) reconsume(offset int64) error {
	if err := f.dir.reconnectKafkaIfNecessary(); err != nil {
		return err
	}

	consumer, err := sarama.NewConsumerFromClient(f.dir.Client)
	if err != nil {
		log.Error(err)

		return err
	}

	cp, err := consumer.ConsumePartition(f.topic, f.partitionId, offset)
	if err != nil {
		log.Error(err)

		return err
	}

	f.consumer = cp
	return nil
}
Ejemplo n.º 12
0
func Consume() {
	config := sarama.NewConfig()
	//config.ChannelBufferSize =
	master, _ := sarama.NewClient([]string{"localhost:9095"}, config)
	cmas, _ := sarama.NewConsumerFromClient(master)
	offsetmanager, _ := sarama.NewOffsetManagerFromClient("test2", master)
	part_man, _ := offsetmanager.ManagePartition("tt.test", 1)
	//part_man.MarkOffset(0, "tt.test")
	off1, mata1 := part_man.NextOffset()
	fmt.Println(off1)
	fmt.Println(mata1)
	csm, _ := cmas.ConsumePartition("tt.test", 1, off1)
	//csm.Messages
	//Loop:
	for i := 0; i < 200; i++ {
		select {
		case message := <-csm.Messages():
			off1++
			part_man.MarkOffset(off1, "tt.test")
			fmt.Println(message)
		case err := <-csm.Errors():
			fmt.Println(err)
			//csm.Close()
			//			break Loop
		}
	}
	part_man.MarkOffset(off1, "tt.test")
	csm.Close()
	fmt.Println(off1)
	part_man.Close()
	//part_man2, _ := offsetmanager.ManagePartition("syslog-ng", 0)
	//off2, mata2 := part_man2.NextOffset()
	//part_man.MarkOffset(off2, "syslog-ng")
	//fmt.Println(off2)
	//fmt.Println(mata2)
	cmas.Close()
	master.Close()
}
Ejemplo n.º 13
0
func NewKafkaClient(app *ApplicationContext, cluster string) (*KafkaClient, error) {
	// Set up sarama config from profile
	clientConfig := sarama.NewConfig()
	profile := app.Config.Clientprofile[app.Config.Kafka[cluster].Clientprofile]
	clientConfig.ClientID = profile.ClientID
	clientConfig.Net.TLS.Enable = profile.TLS
	clientConfig.Net.TLS.Config = &tls.Config{}
	clientConfig.Net.TLS.Config.InsecureSkipVerify = profile.TLSNoVerify

	sclient, err := sarama.NewClient(app.Config.Kafka[cluster].Brokers, clientConfig)
	if err != nil {
		return nil, err
	}

	// Create sarama master consumer
	master, err := sarama.NewConsumerFromClient(sclient)
	if err != nil {
		sclient.Close()
		return nil, err
	}

	client := &KafkaClient{
		app:            app,
		cluster:        cluster,
		client:         sclient,
		masterConsumer: master,
		requestChannel: make(chan *BrokerTopicRequest),
		messageChannel: make(chan *sarama.ConsumerMessage),
		errorChannel:   make(chan *sarama.ConsumerError),
		wgFanIn:        sync.WaitGroup{},
		wgProcessor:    sync.WaitGroup{},
		topicMap:       make(map[string]int),
		topicMapLock:   sync.RWMutex{},
	}

	// Start the main processor goroutines for __consumer_offset messages
	client.wgProcessor.Add(2)
	go func() {
		defer client.wgProcessor.Done()
		for msg := range client.messageChannel {
			go client.processConsumerOffsetsMessage(msg)
		}
	}()
	go func() {
		defer client.wgProcessor.Done()
		for err := range client.errorChannel {
			log.Errorf("Consume error on %s:%v: %v", err.Topic, err.Partition, err.Err)
		}
	}()

	// Start goroutine to handle topic metadata requests. Do this first because the getOffsets call needs this working
	client.RefreshTopicMap()
	go func() {
		for r := range client.requestChannel {
			client.getPartitionCount(r)
		}
	}()

	// Now get the first set of offsets and start a goroutine to continually check them
	client.getOffsets()
	client.brokerOffsetTicker = time.NewTicker(time.Duration(client.app.Config.Tickers.BrokerOffsets) * time.Second)
	go func() {
		for _ = range client.brokerOffsetTicker.C {
			client.getOffsets()
		}
	}()

	// Get a partition count for the consumption topic
	partitions, err := client.client.Partitions(client.app.Config.Kafka[client.cluster].OffsetsTopic)
	if err != nil {
		return nil, err
	}

	// Start consumers for each partition with fan in
	client.partitionConsumers = make([]sarama.PartitionConsumer, len(partitions))
	log.Infof("Starting consumers for %v partitions of %s in cluster %s", len(partitions), client.app.Config.Kafka[client.cluster].OffsetsTopic, client.cluster)
	for i, partition := range partitions {
		pconsumer, err := client.masterConsumer.ConsumePartition(client.app.Config.Kafka[client.cluster].OffsetsTopic, partition, sarama.OffsetNewest)
		if err != nil {
			return nil, err
		}
		client.partitionConsumers[i] = pconsumer
		client.wgFanIn.Add(2)
		go func() {
			defer client.wgFanIn.Done()
			for msg := range pconsumer.Messages() {
				client.messageChannel <- msg
			}
		}()
		go func() {
			defer client.wgFanIn.Done()
			for err := range pconsumer.Errors() {
				client.errorChannel <- err
			}
		}()
	}

	return client, nil
}
Ejemplo n.º 14
0
func main() {
	flag.Parse()

	if *brokers == "" {
		flag.PrintDefaults()
		os.Exit(1)
	}

	brokerList := strings.Split(*brokers, ",")

	cfg := sarama.NewConfig()

	kafkaCllient, e := sarama.NewClient(brokerList, cfg)
	if e != nil {
		panic(e)
	}

	consumer, e := sarama.NewConsumerFromClient(kafkaCllient)
	if e != nil {
		panic(e)
	}

	pConsumer, _ := consumer.ConsumePartition("sensors", 0, 0)

	u, _ := url.Parse("http://localhost:8086")
	influxConfig := influx.Config{
		URL:      *u,
		Username: "******",
		Password: "******",
	}
	influxClient, _ := influx.NewClient(influxConfig)
	influxClient.Ping()

	for msg := range pConsumer.Messages() {
		// Decode the SensorData from protobuf string data.
		data := &sensory_proto.SensorData{}
		proto.Unmarshal(msg.Value, data)

		pts := make([]influx.Point, len(data.Reads))

		for i, read := range data.Reads {
			readType := typeToString[read.Type]

			var tm time.Time
			if read.Timestamp == 0 {
				tm = time.Now()
			} else {
				print(intVal)
				tm = time.Unix(int64(read.Timestamp), 0)
			}

			fields := make(map[string]interface{})
			var (
				intVal *int32   = &read.Intval
				fVal   *float32 = &read.Floatval
			)
			if intVal != nil {
				fields["value"] = *intVal
			}
			if fVal != nil {
				fields["value"] = *fVal
			}
			if sender, ok := senderToString[data.SenderId]; ok {
				fields["sender"] = sender
			}

			point := influx.Point{
				Measurement: readType,
				Fields:      fields,
				Tags:        map[string]string{},
				Time:        tm,
			}

			pts[i] = point
			fmt.Printf("Sender ID %d type(%s) int(%d) float(%f)\n", data.SenderId, readType, read.Intval, read.Floatval)
		}

		bps := influx.BatchPoints{
			Points:          pts,
			Database:        "sensors",
			RetentionPolicy: "default",
		}

		_, err := influxClient.Write(bps)
		if err != nil {
			print("ERROR")
			fmt.Println(err)
		}
	}
}
Ejemplo n.º 15
0
func main() {
	flag.Parse()
	if *verbose {
		sarama.Logger = log.New(os.Stdout, "[Sarama]", log.LstdFlags)
	}

	brokers := getBrokers(*brokerIP, *brokerPorts)
	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true
	client, err := sarama.NewClient(brokers, config)
	if err != nil {
		logger.Fatal(err)
	}

	defer func() {
		if err := client.Close(); err != nil {
			logger.Fatal(err)
		}
	}()

	topic := "my_topic"
	partitions, err := client.Partitions(topic)
	if err != nil {
		logger.Fatal(err)
	}
	master, err := sarama.NewConsumerFromClient(client)
	if err != nil {
		logger.Fatal(err)
	}
	defer func() {
		if err := master.Close(); err != nil {
			logger.Fatal(err)
		}
	}()

	var wg sync.WaitGroup
	wg.Add(len(partitions))
	for _, partition := range partitions {
		go func(master sarama.Consumer, partition int32) {
			defer wg.Done()
			consumer, err := master.ConsumePartition(topic, partition, sarama.OffsetNewest)
			if err != nil {
				logger.Fatal(err)
			}
			signals := make(chan os.Signal, 1)
			signal.Notify(signals, os.Interrupt)

			// Count how many message processed
			msgCount := 0

			// Get signnal for finish
			doneCh := make(chan struct{})
			go func() {
				for {
					select {
					case err := <-consumer.Errors():
						logger.Println(err)
					case msg := <-consumer.Messages():
						msgCount++
						logger.Println("Received messages", string(msg.Key), string(msg.Value))
					case <-signals:
						logger.Println("Interrupt is detected")
						doneCh <- struct{}{}
					}
				}
			}()

			<-doneCh
			fmt.Println("Processed", msgCount, "messages")
		}(master, partition)
	}
	wg.Wait()

}