Exemplo n.º 1
6
func NewKafkaDeliver(store *Store, clientId string, brokerList []string) (*KafkaDeliver, error) {
	log.Println("go=kafka at=new-kafka-deliver")
	clientConfig := sarama.NewClientConfig()
	producerConfig := sarama.NewProducerConfig()

	client, err := sarama.NewClient(clientId, brokerList, clientConfig)
	if err != nil {
		return nil, err
	}
	log.Println("go=kafka at=created-client")

	producer, err := sarama.NewProducer(client, producerConfig)
	if err != nil {
		return nil, err
	}
	log.Println("go=kafka at=created-producer")

	return &KafkaDeliver{
		clientId:          clientId,
		brokerList:        brokerList,
		store:             store,
		producer:          producer,
		producerConfig:    producerConfig,
		client:            client,
		clientConfig:      clientConfig,
		deliverGoroutines: 8,
		shutdownDeliver:   make(chan bool, 8),
		shutdown:          make(chan bool, 8),
	}, nil

}
Exemplo n.º 2
1
// kafkaClient initializes a connection to a Kafka cluster and
// initializes one or more clientProducer() (producer instances).
func kafkaClient(n int) {
	switch noop {
	// If not noop, actually fire up Kafka connections and send messages.
	case false:
		cId := "client_" + strconv.Itoa(n)

		conf := kafka.NewConfig()
		if compression != kafka.CompressionNone {
			conf.Producer.Compression = compression
		}
		conf.Producer.Flush.MaxMessages = batchSize

		client, err := kafka.NewClient(brokers, conf)
		if err != nil {
			log.Println(err)
			os.Exit(1)
		} else {
			log.Printf("%s connected\n", cId)
		}
		for i := 0; i < producers; i++ {
			go clientProducer(client)
		}
	// If noop, we're not creating connections at all.
	// Just generate messages and burn CPU.
	default:
		for i := 0; i < producers; i++ {
			go clientDummyProducer()
		}
	}
	<-killClients
}
Exemplo n.º 3
0
func NewKafka(numberOfMessages int, testLatency bool) *Kafka {
	pubClient, _ := sarama.NewClient("pub", []string{"localhost:9092"}, sarama.NewClientConfig())
	subClient, _ := sarama.NewClient("sub", []string{"localhost:9092"}, sarama.NewClientConfig())

	topic := "test"
	pub, _ := sarama.NewProducer(pubClient, sarama.NewProducerConfig())
	consumerConfig := sarama.NewConsumerConfig()
	consumerConfig.OffsetMethod = sarama.OffsetMethodNewest // Only read new messages
	consumerConfig.DefaultFetchSize = 10 * 1024 * 1024
	sub, _ := sarama.NewConsumer(subClient, topic, 0, "test", consumerConfig)

	var handler benchmark.MessageHandler
	if testLatency {
		handler = &benchmark.LatencyMessageHandler{
			NumberOfMessages: numberOfMessages,
			Latencies:        []float32{},
		}
	} else {
		handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages}
	}

	return &Kafka{
		handler:   handler,
		pubClient: pubClient,
		subClient: subClient,
		pub:       pub,
		sub:       sub,
		topic:     topic,
	}
}
Exemplo n.º 4
0
func main() {
	client, err := sarama.NewClient("a_logger_for_mhub", []string{"localhost:9092"}, nil)
	if err != nil {
		panic(err)
	} else {
		os.Stderr.WriteString("> connected\n")
	}
	defer client.Close()

	consumer, err := sarama.NewConsumer(client, "received", 0, "", nil)
	if err != nil {
		panic(err)
	} else {
		os.Stderr.WriteString("> consumer ready\n")
	}
	defer consumer.Close()

	for {
		select {
		case event := <-consumer.Events():
			if event.Err != nil {
				panic(event.Err)
			}
			fmt.Println(utf8.FullRune(event.Value))
		}
	}
}
Exemplo n.º 5
0
func (this *Topics) clusterSummary(zkcluster *zk.ZkCluster) []topicSummary {
	r := make([]topicSummary, 0, 10)

	kfk, err := sarama.NewClient(zkcluster.BrokerList(), saramaConfig())
	if err != nil {
		this.Ui.Error(err.Error())
		return nil
	}
	defer kfk.Close()

	topicInfos, _ := kfk.Topics()
	for _, t := range topicInfos {
		flat := int64(0)
		cum := int64(0)
		alivePartitions, _ := kfk.WritablePartitions(t)
		for _, partitionID := range alivePartitions {
			latestOffset, _ := kfk.GetOffset(t, partitionID, sarama.OffsetNewest)
			oldestOffset, _ := kfk.GetOffset(t, partitionID, sarama.OffsetOldest)
			flat += (latestOffset - oldestOffset)
			cum += latestOffset
		}

		r = append(r, topicSummary{zkcluster.ZkZone().Name(), zkcluster.Name(), t, len(alivePartitions), flat, cum})
	}

	return r
}
Exemplo n.º 6
0
func CreateKafkaTopic() *KafkaTopic {
	client, err := sarama.NewClient([]string{"kafka:9092"}, sarama.NewConfig())
	if err != nil {
		panic(err)
	} else {
		fmt.Printf("Kafka Client connected: %v\n", client)
	}

	topic := "http-request"
	producer, err := sarama.NewAsyncProducerFromClient(client)
	if err != nil {
		panic(err)
	} else {
		fmt.Printf("Kafka Producer connected: %v\n", producer)
	}
	producable := producer.Input()

	consumer, err := sarama.NewConsumerFromClient(client)
	if err != nil {
		panic(err)
	} else {
		fmt.Printf("Kafka Consumer connected: %v\n", consumer)
	}

	consumable, err := consumer.ConsumePartition(topic, 0, 0)
	if err != nil {
		panic(err)
	}

	return &KafkaTopic{client, topic, producer, producable, consumer, consumable}
}
Exemplo n.º 7
0
func (this *Ping) diagnose() {
	this.zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) {
		registeredBrokers := zkcluster.RegisteredInfo().Roster
		for _, broker := range registeredBrokers {
			log.Debug("ping %s", broker.Addr())

			kfk, err := sarama.NewClient([]string{broker.Addr()}, sarama.NewConfig())
			if err != nil {
				log.Error("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Red(err.Error()))

				continue
			}

			_, err = kfk.Topics() // kafka didn't provide ping, so use Topics() as ping
			if err != nil {
				log.Error("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Red(err.Error()))
			} else {
				if !this.problematicMode {
					log.Info("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Green("ok"))
				}
			}
			kfk.Close()
		}
	})

}
Exemplo n.º 8
0
func newKafkaClient(proc int, brokerList []string, hostname string) (sarama.Client, error) {
	sarama.MaxRequestSize = 100 * 1024 * 1024
	sarama.MaxResponseSize = 100 * 1024 * 1024

	config := sarama.NewConfig()
	config.Net.MaxOpenRequests = proc * 2
	config.Producer.MaxMessageBytes = int(sarama.MaxRequestSize)
	config.Producer.RequiredAcks = sarama.WaitForAll
	config.Producer.Partitioner = sarama.NewRandomPartitioner
	config.Metadata.RefreshFrequency = 10 * time.Second
	config.ClientID = "indexer"
	// config.Producer.Compression = sarama.CompressionGZIP
	// config.Producer.Flush.MaxMessages = 10000

	cl, err := sarama.NewClient(brokerList, config)
	if err != nil {
		return nil, err
	}

	// partitionerCreator := func(topic string) sarama.Partitioner {
	// return newLocalAwarePartitioner(cl, topic, hostname)
	// }

	// config.Producer.Partitioner = partitionerCreator
	return cl, nil
}
Exemplo n.º 9
0
func NewKafka(numberOfMessages int, testLatency bool) *Kafka {
	config := sarama.NewConfig()
	client, _ := sarama.NewClient([]string{"localhost:9092"}, config)

	topic := "test"
	pub, _ := sarama.NewAsyncProducer([]string{"localhost:9092"}, config)
	consumer, _ := sarama.NewConsumerFromClient(client)
	sub, _ := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)

	var handler benchmark.MessageHandler
	if testLatency {
		handler = &benchmark.LatencyMessageHandler{
			NumberOfMessages: numberOfMessages,
			Latencies:        []float32{},
		}
	} else {
		handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages}
	}

	return &Kafka{
		handler: handler,
		client:  client,
		pub:     pub,
		sub:     sub,
		topic:   topic,
	}
}
Exemplo n.º 10
0
func TestSendData(t *testing.T) {
	kafkaClient, err := sarama.NewClient(brokerList, config)
	if err != nil {
		panic(err)
	}
	defer kafkaClient.Close()
	partitionID, err := kafkaClient.Partitions(topicsInit[0])
	convey.Convey("err should be nil", t, func() {
		convey.So(err, convey.ShouldEqual, nil)
	})
	convey.Convey("partitionID should not be nil ", t, func() {
		convey.So(partitionID, convey.ShouldNotEqual, nil)
	})
	offset, err := kafkaClient.GetOffset("test", partitionID[0], sarama.OffsetOldest)
	convey.Convey("err should be nil", t, func() {
		convey.So(err, convey.ShouldEqual, nil)
	})
	producer.NewProducer(brokerList, topicsInit, config)
	producer.SendData(topicsInit[0], "init message")
	offset2, err := kafkaClient.GetOffset("test", partitionID[0], sarama.OffsetOldest)
	convey.Convey("err should be nil", t, func() {
		convey.So(err, convey.ShouldEqual, nil)
	})
	if offset == 0 {
		convey.Convey("offset2 should not be equal to offset ", t, func() {
			convey.So(offset2, convey.ShouldEqual, offset)
		})
	} else {
		convey.Convey("offset2 should not be equal to offset + 1 ", t, func() {
			convey.So(offset2, convey.ShouldEqual, offset+1)
		})
	}
}
Exemplo n.º 11
0
func Produce(Quit chan bool, Host []string, Topic string, Data chan []byte) {
	client, err := sarama.NewClient("crontab_client", Host, sarama.NewClientConfig())
	if err != nil {
		panic(err)
	} else {
		log.Println("kafka producer connected")
	}
	defer client.Close()

	cfg := sarama.NewProducerConfig()
	cfg.Partitioner = sarama.NewRoundRobinPartitioner
	producer, err := sarama.NewProducer(client, cfg)
	if err != nil {
		panic(err)
	}
	defer producer.Close()
	log.Println("kafka producer ready")

	for {
		select {
		case pack := <-Data:
			producer.Input() <- &sarama.MessageToSend{Topic: Topic, Key: nil, Value: sarama.ByteEncoder(pack)}
		case err := <-producer.Errors():
			log.Println(err)
		case <-Quit:
			break
		}
	}
}
func saramaClient() sarama.Client {
	client, err := sarama.NewClient(kafkaPeers, nil)
	if err != nil {
		panic(err)
	}
	return client
}
Exemplo n.º 13
0
func (prod *Kafka) tryOpenConnection() bool {
	// Reconnect the client first
	if prod.client == nil {
		if client, err := kafka.NewClient(prod.servers, prod.config); err == nil {
			prod.client = client
		} else {
			Log.Error.Print("Kafka client error:", err)
			prod.client = nil
			prod.producer = nil
			return false // ### return, connection failed ###
		}
	}

	// Make sure we have a producer up and running
	if prod.producer == nil {
		if producer, err := kafka.NewAsyncProducerFromClient(prod.client); err == nil {
			prod.producer = producer
		} else {
			Log.Error.Print("Kafka producer error:", err)
			prod.client.Close()
			prod.client = nil
			prod.producer = nil
			return false // ### return, connection failed ###
		}
	}

	prod.Control() <- core.PluginControlFuseActive
	return true
}
Exemplo n.º 14
0
func generateKafkaData(t *testing.T, topic string) {
	config := sarama.NewConfig()
	client, err := sarama.NewClient([]string{getTestKafkaHost()}, config)
	if err != nil {
		t.Errorf("%s", err)
	}

	producer, err := sarama.NewSyncProducerFromClient(client)
	if err != nil {
		t.Error(err)
	}
	defer producer.Close()

	msg := &sarama.ProducerMessage{
		Topic: topic,
		Value: sarama.StringEncoder("Hello World"),
	}

	_, _, err = producer.SendMessage(msg)
	if err != nil {
		t.Errorf("FAILED to send message: %s\n", err)
	}

	client.RefreshMetadata(topic)
}
Exemplo n.º 15
0
func getClient(brokers []string) sarama.Client {
	client, err := sarama.NewClient(brokers, nil)
	if err != nil {
		panic(err)
	}
	return client
}
Exemplo n.º 16
0
func (this *Peek) consumeCluster(zkcluster *zk.ZkCluster, topicPattern string,
	partitionId int, msgChan chan *sarama.ConsumerMessage) {
	brokerList := zkcluster.BrokerList()
	if len(brokerList) == 0 {
		return
	}
	kfk, err := sarama.NewClient(brokerList, sarama.NewConfig())
	if err != nil {
		this.Ui.Output(err.Error())
		return
	}
	//defer kfk.Close() // FIXME how to close it

	topics, err := kfk.Topics()
	if err != nil {
		this.Ui.Output(err.Error())
		return
	}

	for _, t := range topics {
		if patternMatched(t, topicPattern) {
			go this.simpleConsumeTopic(zkcluster, kfk, t, int32(partitionId), msgChan)
		}
	}

}
Exemplo n.º 17
0
func (t *Transport) Connect() error {

	config := sarama.NewConfig()
	config.Producer.Compression = sarama.CompressionSnappy

	client, err := sarama.NewClient(t.Brokers, config)
	if err != nil {
		return err
	}
	t.client = client

	producer, err := sarama.NewAsyncProducerFromClient(t.client)
	if err != nil {
		return err
	}
	t.producer = producer

	// Consumer configuration
	zkConfig := kafkaClient.NewZookeeperConfig()
	zkConfig.ZookeeperConnect = t.ZookeeperHosts

	consumerConfig := kafkaClient.DefaultConsumerConfig()
	consumerConfig.Coordinator = kafkaClient.NewZookeeperCoordinator(zkConfig)
	consumerConfig.RebalanceMaxRetries = 10
	consumerConfig.NumWorkers = 1
	consumerConfig.NumConsumerFetchers = 1
	consumerConfig.AutoOffsetReset = kafkaClient.LargestOffset
	t.consumerConfig = *consumerConfig

	return nil
}
Exemplo n.º 18
0
func produceNToTopicPartition(t *testing.T, n int, topic string, partition int, brokerAddr string) {
	client, err := sarama.NewClient("test-client", []string{brokerAddr}, sarama.NewClientConfig())
	if err != nil {
		t.Fatal(err)
	}
	defer client.Close()

	producerConfig := sarama.NewProducerConfig()
	partitionerFactory := &SaramaPartitionerFactory{NewFixedPartitioner}
	producerConfig.Partitioner = partitionerFactory.PartitionerConstructor
	producer, err := sarama.NewProducer(client, producerConfig)
	encoder := &Int32Encoder{}
	if err != nil {
		t.Fatal(err)
	}
	defer producer.Close()
	for i := 0; i < n; i++ {
		key, _ := encoder.Encode(uint32(partition))
		producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: sarama.ByteEncoder(key), Value: sarama.StringEncoder(fmt.Sprintf("test-kafka-message-%d", i))}
	}
	select {
	case e := <-producer.Errors():
		t.Fatalf("Failed to produce message: %s", e)
	case <-time.After(5 * time.Second):
	}
}
Exemplo n.º 19
0
// Creates a KafkaLogger for a given kafka cluster. We identify ourselves with clientId.
func NewKafkaLogger(clientId string, brokers []string) (request_handler.SpadeEdgeLogger, error) {
	c, err := sarama.NewClient(clientId, brokers, sarama.NewClientConfig())
	if err != nil {
		return nil, err
	}

	config := sarama.NewProducerConfig()
	config.Partitioner = sarama.NewRoundRobinPartitioner
	config.FlushFrequency = 500 * time.Millisecond
	config.FlushMsgCount = 1000
	// Might want to try out compression
	config.Compression = sarama.CompressionNone
	config.AckSuccesses = true

	p, err := NewProducer(c, GetTopic(), config)
	if err != nil {
		return nil, err
	}

	k := &KafkaLogger{
		Producer: p,
	}
	hystrix.ConfigureCommand(hystrixCommandName, hystrix.CommandConfig{
		Timeout:               1000,
		MaxConcurrentRequests: hystrixConcurrencyLevel,
		ErrorPercentThreshold: 10,
	})
	return k, nil
}
Exemplo n.º 20
0
func (this *Clusters) clusterSummary(zkcluster *zk.ZkCluster) (brokers, topics, partitions int, flat, cum int64) {
	brokerInfos := zkcluster.Brokers()
	brokers = len(brokerInfos)

	kfk, err := sarama.NewClient(zkcluster.BrokerList(), saramaConfig())
	if err != nil {
		this.Ui.Error(err.Error())
		return
	}
	defer kfk.Close()

	topicInfos, _ := kfk.Topics()
	topics = len(topicInfos)
	for _, t := range topicInfos {
		alivePartitions, _ := kfk.WritablePartitions(t)
		partitions += len(alivePartitions)

		for _, partitionID := range alivePartitions {
			latestOffset, _ := kfk.GetOffset(t, partitionID, sarama.OffsetNewest)
			oldestOffset, _ := kfk.GetOffset(t, partitionID, sarama.OffsetOldest)
			flat += (latestOffset - oldestOffset)
			cum += latestOffset
		}

	}

	return
}
Exemplo n.º 21
0
func Start(zkstr string) {

	kafkaCluster, err := NewCluster(zkstr)
	if err != nil {
		panic(err)
	}

	seedBroker := kafkaCluster.SeedBroker()

	log.Println("Seedbroker: " + seedBroker)

	// initialize logic
	client, err := sarama.NewClient([]string{seedBroker}, nil)
	if err != nil {
		fmt.Println(err.Error())
		return
	}
	defer client.Close()

	content := NewTopicScreen(kafkaCluster, client, seedBroker)

	topicScreen := NewScreen(content)

	log.Println("showing the topic screen now")
	topicScreen.Show()
	topicScreen.WaitForExit()
}
Exemplo n.º 22
0
func main() {
	client, err := kafka.NewClient("my_client", []string{"localhost:9092"}, nil)
	if err != nil {
		panic(err)
	} else {
		fmt.Println("> connected")
	}
	defer client.Close()

	consumer, err := kafka.NewConsumer(client, "my_topic", 0, "my_consumer_group", kafka.NewConsumerConfig())
	if err != nil {
		panic(err)
	} else {
		fmt.Println("> consumer ready")
	}
	defer consumer.Close()

	msgCount := 0
consumerLoop:
	for {
		select {
		case event := <-consumer.Events():
			if event.Err != nil {
				panic(event.Err)
			}
			msgCount++
		case <-time.After(5 * time.Second):
			fmt.Println("> timed out")
			break consumerLoop
		}
	}
	fmt.Println("Got", msgCount, "messages.")
}
Exemplo n.º 23
0
// NewClient returns a Kafka client
func NewClient(addresses []string) (sarama.Client, error) {
	config := sarama.NewConfig()
	hostname, err := os.Hostname()
	if err != nil {
		hostname = ""
	}
	config.ClientID = hostname
	config.Producer.Compression = sarama.CompressionSnappy
	config.Producer.Return.Successes = true

	var client sarama.Client
	retries := outOfBrokersRetries + 1
	for retries > 0 {
		client, err = sarama.NewClient(addresses, config)
		retries--
		if err == sarama.ErrOutOfBrokers {
			glog.Errorf("Can't connect to the Kafka cluster at %s (%d retries left): %s",
				addresses, retries, err)
			time.Sleep(outOfBrokersBackoff)
		} else {
			break
		}
	}
	return client, err
}
Exemplo n.º 24
0
func testSarama(topic string, partition int32, seconds int) {
	stop := false

	config := sarama.NewClientConfig()
	client, err := sarama.NewClient("siesta", []string{"localhost:9092"}, config)
	if err != nil {
		panic(err)
	}

	messageChannel := make(chan *sarama.MessageSet, 10000)
	count := 0
	go func() {
		for {
			set := <-messageChannel
			count += len(set.Messages)
		}
	}()

	broker, err := client.Leader(topic, partition)

	//warm up
	fmt.Println("warming up")
	for i := 0; i < 5; i++ {
		fetchRequest := new(sarama.FetchRequest)
		fetchRequest.MinBytes = 1
		fetchRequest.MaxWaitTime = 100
		fetchRequest.AddBlock(topic, partition, 0, 500)

		broker.Fetch("siesta", fetchRequest)
	}
	fmt.Println("warm up finished, starting")

	go func() {
		time.Sleep(time.Duration(seconds) * time.Second)
		stop = true
	}()

	offset := int64(0)
	if err != nil {
		panic(err)
	}
	for !stop {
		fetchRequest := new(sarama.FetchRequest)
		fetchRequest.MinBytes = 1
		fetchRequest.MaxWaitTime = 100
		fetchRequest.AddBlock(topic, partition, offset, 500)

		response, err := broker.Fetch("siesta", fetchRequest)
		if err != nil {
			panic(err)
		}
		set := response.Blocks[topic][partition].MsgSet
		messageChannel <- &set
		offset = set.Messages[len(set.Messages)-1].Offset
	}

	fmt.Printf("%d within %d secnods\n", count, seconds)
	fmt.Printf("%d average\n", count/seconds)
}
Exemplo n.º 25
0
func (this *Brokers) clusterBrokers(zone, cluster string, brokers map[string]*zk.BrokerZnode) []string {
	if !patternMatched(cluster, this.cluster) {
		return nil
	}

	if brokers == nil || len(brokers) == 0 {
		return []string{fmt.Sprintf("%s|%s|%s|%s|%s",
			zone, cluster, " ", color.Red("empty brokers"), " ")}
	}

	lines := make([]string, 0, len(brokers))
	if this.staleOnly {
		// try each broker's aliveness
		for brokerId, broker := range brokers {
			cf := sarama.NewConfig()
			cf.Net.ReadTimeout = time.Second * 4
			cf.Net.WriteTimeout = time.Second * 4
			kfk, err := sarama.NewClient([]string{broker.Addr()}, cf)
			if err != nil {
				lines = append(lines, fmt.Sprintf("%s|%s|%s|%s|%s",
					zone, cluster,
					brokerId, broker.Addr(),
					fmt.Sprintf("%s: %v", gofmt.PrettySince(broker.Uptime()), err)))
			} else {
				kfk.Close()
			}
		}

		return lines
	}

	// sort by broker id
	sortedBrokerIds := make([]string, 0, len(brokers))
	for brokerId, _ := range brokers {
		sortedBrokerIds = append(sortedBrokerIds, brokerId)
	}
	sort.Strings(sortedBrokerIds)

	for _, brokerId := range sortedBrokerIds {
		b := brokers[brokerId]
		uptime := gofmt.PrettySince(b.Uptime())
		if time.Since(b.Uptime()) < time.Hour*24*7 {
			uptime = color.Green(uptime)
		}
		if this.ipInNumber {
			lines = append(lines, fmt.Sprintf("%s|%s|%s|%s|%s",
				zone, cluster,
				brokerId, b.Addr(),
				gofmt.PrettySince(b.Uptime())))
		} else {
			lines = append(lines, fmt.Sprintf("%s|%s|%s|%s|%s",
				zone, cluster,
				brokerId, b.NamedAddr(),
				gofmt.PrettySince(b.Uptime())))
		}

	}
	return lines
}
Exemplo n.º 26
0
func runProduce(cmd *Command, args []string) {
	brokers := brokers()
	config := sarama.NewConfig()
	config.ClientID = "k produce"
	config.Producer.Return.Successes = true
	client, err := sarama.NewClient(brokers, config)
	must(err)
	defer client.Close()

	producer, err := sarama.NewAsyncProducerFromClient(client)
	must(err)

	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)
	defer close(signals)

	var wg sync.WaitGroup
	var enqueued, successes, errors int

	wg.Add(1)
	go func() {
		defer wg.Done()
		for _ = range producer.Successes() {
			successes++
		}
	}()

	wg.Add(1)
	go func() {
		defer wg.Done()
		for err := range producer.Errors() {
			fmt.Fprintf(os.Stderr, "Failed to produce message: %s\n", err)
			errors++
		}
	}()

	scanner := bufio.NewScanner(os.Stdin)
producerLoop:
	for scanner.Scan() {
		line := scanner.Text()
		idx := strings.Index(line, "\t")
		var msg *sarama.ProducerMessage
		if idx > 0 {
			msg = &sarama.ProducerMessage{Topic: topic, Key: sarama.ByteEncoder(line[0:idx]), Value: sarama.ByteEncoder(line[idx+1:])}
		} else {
			msg = &sarama.ProducerMessage{Topic: topic, Key: nil, Value: sarama.ByteEncoder(line)}
		}
		select {
		case producer.Input() <- msg:
			enqueued++
		case <-signals:
			break producerLoop
		}
	}

	producer.AsyncClose()
	wg.Wait()
	fmt.Fprintf(os.Stderr, "messages produced: %d, errors: %d\n", successes, errors)
}
Exemplo n.º 27
0
func (b *Broker) initBC() error {
	var err error
	b.bc, err = sarama.NewClient(b.config.brokerServerList, b.brokerConfig)
	if err != nil {
		return err
	}
	return nil
}
Exemplo n.º 28
0
func (cg *ConsumerGroup) GetNewestOffset(topic string, partition int32, brokers []string) (int64, error) {
	client, err := sarama.NewClient(brokers, nil)
	if err != nil {
		return 0, err
	}
	defer client.Close()
	return client.GetOffset(topic, partition, sarama.OffsetNewest)
}
Exemplo n.º 29
0
Arquivo: topic.go Projeto: fgeller/kt
func topicRun(closer chan struct{}) {
	var err error
	if config.topic.verbose {
		sarama.Logger = log.New(os.Stderr, "", log.LstdFlags)
	}

	conf := sarama.NewConfig()
	conf.Version = config.topic.version
	u, err := user.Current()
	if err != nil {
		fmt.Fprintf(os.Stderr, "Failed to read current user err=%v", err)
	}
	conf.ClientID = "kt-topic-" + u.Username
	if config.topic.verbose {
		fmt.Fprintf(os.Stderr, "sarama client configuration %#v\n", conf)
	}

	client, err := sarama.NewClient(config.topic.brokers, conf)
	if err != nil {
		fmt.Fprintf(os.Stderr, "Failed to create client err=%v\n", err)
		os.Exit(1)
	}
	defer client.Close()

	allTopics, err := client.Topics()
	if err != nil {
		fmt.Fprintf(os.Stderr, "Failed to read topics err=%v\n", err)
		os.Exit(1)
	}

	topics := []string{}
	for _, t := range allTopics {
		if config.topic.filter.MatchString(t) {
			topics = append(topics, t)
		}
	}

	out := make(chan string)
	go func() {
		for {
			select {
			case m := <-out:
				fmt.Println(m)
			}
		}
	}()

	var wg sync.WaitGroup
	for _, tn := range topics {
		wg.Add(1)
		go func(t string) {
			printTopic(client, t, out)
			wg.Done()
		}(tn)
	}
	wg.Wait()
}
Exemplo n.º 30
-2
// NewPeer creates and returns a new Peer for communicating with Kafka.
func NewPeer(host string) (*Peer, error) {
	host = strings.Split(host, ":")[0] + ":9092"
	config := sarama.NewConfig()
	client, err := sarama.NewClient([]string{host}, config)
	if err != nil {
		return nil, err
	}

	producer, err := sarama.NewAsyncProducer([]string{host}, config)
	if err != nil {
		return nil, err
	}

	consumer, err := sarama.NewConsumer([]string{host}, config)
	if err != nil {
		return nil, err
	}

	partitionConsumer, err := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
	if err != nil {
		return nil, err
	}

	return &Peer{
		client:   client,
		producer: producer,
		consumer: partitionConsumer,
		send:     make(chan []byte),
		errors:   make(chan error, 1),
		done:     make(chan bool),
	}, nil
}