Esempio n. 1
0
func (t *Transport) Connect() error {

	config := sarama.NewConfig()
	config.Producer.Compression = sarama.CompressionSnappy

	client, err := sarama.NewClient(t.Brokers, config)
	if err != nil {
		return err
	}
	t.client = client

	producer, err := sarama.NewAsyncProducerFromClient(t.client)
	if err != nil {
		return err
	}
	t.producer = producer

	// Consumer configuration
	zkConfig := kafkaClient.NewZookeeperConfig()
	zkConfig.ZookeeperConnect = t.ZookeeperHosts

	consumerConfig := kafkaClient.DefaultConsumerConfig()
	consumerConfig.Coordinator = kafkaClient.NewZookeeperCoordinator(zkConfig)
	consumerConfig.RebalanceMaxRetries = 10
	consumerConfig.NumWorkers = 1
	consumerConfig.NumConsumerFetchers = 1
	consumerConfig.AutoOffsetReset = kafkaClient.LargestOffset
	t.consumerConfig = *consumerConfig

	return nil
}
Esempio n. 2
0
func (prod *Kafka) tryOpenConnection() bool {
	// Reconnect the client first
	if prod.client == nil {
		if client, err := kafka.NewClient(prod.servers, prod.config); err == nil {
			prod.client = client
		} else {
			Log.Error.Print("Kafka client error:", err)
			prod.client = nil
			prod.producer = nil
			return false // ### return, connection failed ###
		}
	}

	// Make sure we have a producer up and running
	if prod.producer == nil {
		if producer, err := kafka.NewAsyncProducerFromClient(prod.client); err == nil {
			prod.producer = producer
		} else {
			Log.Error.Print("Kafka producer error:", err)
			prod.client.Close()
			prod.client = nil
			prod.producer = nil
			return false // ### return, connection failed ###
		}
	}

	prod.Control() <- core.PluginControlFuseActive
	return true
}
Esempio n. 3
0
func CreateKafkaTopic() *KafkaTopic {
	client, err := sarama.NewClient([]string{"kafka:9092"}, sarama.NewConfig())
	if err != nil {
		panic(err)
	} else {
		fmt.Printf("Kafka Client connected: %v\n", client)
	}

	topic := "http-request"
	producer, err := sarama.NewAsyncProducerFromClient(client)
	if err != nil {
		panic(err)
	} else {
		fmt.Printf("Kafka Producer connected: %v\n", producer)
	}
	producable := producer.Input()

	consumer, err := sarama.NewConsumerFromClient(client)
	if err != nil {
		panic(err)
	} else {
		fmt.Printf("Kafka Consumer connected: %v\n", consumer)
	}

	consumable, err := consumer.ConsumePartition(topic, 0, 0)
	if err != nil {
		panic(err)
	}

	return &KafkaTopic{client, topic, producer, producable, consumer, consumable}
}
Esempio n. 4
0
func produceNToTopicPartition(t *testing.T, n int, topic string, partition int, brokerAddr string) {
	clientConfig := sarama.NewConfig()
	partitionerFactory := &SaramaPartitionerFactory{NewFixedPartitioner}
	clientConfig.Producer.Partitioner = partitionerFactory.PartitionerConstructor
	clientConfig.Producer.Timeout = 10 * time.Second
	client, err := sarama.NewClient([]string{brokerAddr}, clientConfig)
	if err != nil {
		t.Fatal(err)
	}
	defer client.Close()
	producer, err := sarama.NewAsyncProducerFromClient(client)
	encoder := &Int32Encoder{}
	if err != nil {
		t.Fatal(err)
	}
	defer producer.Close()
	for i := 0; i < n; i++ {
		key, _ := encoder.Encode(uint32(partition))
		producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: sarama.ByteEncoder(key), Value: sarama.StringEncoder(fmt.Sprintf("test-kafka-message-%d", i))}
	}
	select {
	case e := <-producer.Errors():
		t.Fatalf("Failed to produce message: %s", e)
	case <-time.After(5 * time.Second):
	}
}
Esempio n. 5
0
func runProduce(cmd *Command, args []string) {
	brokers := brokers()
	config := sarama.NewConfig()
	config.ClientID = "k produce"
	config.Producer.Return.Successes = true
	client, err := sarama.NewClient(brokers, config)
	must(err)
	defer client.Close()

	producer, err := sarama.NewAsyncProducerFromClient(client)
	must(err)

	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)
	defer close(signals)

	var wg sync.WaitGroup
	var enqueued, successes, errors int

	wg.Add(1)
	go func() {
		defer wg.Done()
		for _ = range producer.Successes() {
			successes++
		}
	}()

	wg.Add(1)
	go func() {
		defer wg.Done()
		for err := range producer.Errors() {
			fmt.Fprintf(os.Stderr, "Failed to produce message: %s\n", err)
			errors++
		}
	}()

	scanner := bufio.NewScanner(os.Stdin)
producerLoop:
	for scanner.Scan() {
		line := scanner.Text()
		idx := strings.Index(line, "\t")
		var msg *sarama.ProducerMessage
		if idx > 0 {
			msg = &sarama.ProducerMessage{Topic: topic, Key: sarama.ByteEncoder(line[0:idx]), Value: sarama.ByteEncoder(line[idx+1:])}
		} else {
			msg = &sarama.ProducerMessage{Topic: topic, Key: nil, Value: sarama.ByteEncoder(line)}
		}
		select {
		case producer.Input() <- msg:
			enqueued++
		case <-signals:
			break producerLoop
		}
	}

	producer.AsyncClose()
	wg.Wait()
	fmt.Fprintf(os.Stderr, "messages produced: %d, errors: %d\n", successes, errors)
}
Esempio n. 6
0
func main() {
	flag.Parse()

	if *verbose {
		sarama.Logger = logger
	}

	config := sarama.NewConfig()
	config.Producer.Return.Successes = true
	config.Producer.Return.Errors = true
	config.Consumer.Return.Errors = true

	client, err := sarama.NewClient(strings.Split(*brokerList, ","), config)
	if err != nil {
		logger.Fatalln("Failed to start Kafka client:", err)
	}
	defer func() {
		if err := client.Close(); err != nil {
			logger.Println("Failed to close client:", err)
		}
	}()

	producer, err := sarama.NewAsyncProducerFromClient(client)
	if err != nil {
		logger.Fatalln("Failed to start Kafka producer:", err)
	}

	consumer, err := sarama.NewConsumerFromClient(client)
	if err != nil {
		logger.Fatalln("Failed to start Kafka consumer:", err)
	}

	signal.Notify(shutdown, os.Interrupt, os.Kill, syscall.SIGHUP, syscall.SIGTERM)
	expectations := make(chan *sarama.ProducerMessage, ExpectationBufferSize)

	started := time.Now()

	var verifierWg sync.WaitGroup
	verifierWg.Add(2)
	go expectationProducer(producer, expectations, &verifierWg)
	go expectationConsumer(consumer, expectations, &verifierWg)
	verifierWg.Wait()

	stats.Print()

	logger.Println()
	logger.Printf("Done after %0.2fs.\n", float64(time.Since(started))/float64(time.Second))
}
func NewSaramaProducer(conf *ProducerConfig) Producer {
	if err := conf.Validate(); err != nil {
		panic(err)
	}

	config := sarama.NewConfig()
	config.ClientID = conf.Clientid
	config.ChannelBufferSize = conf.SendBufferSize
	switch strings.ToLower(conf.CompressionCodec) {
	case "none":
		config.Producer.Compression = sarama.CompressionNone
	case "gzip":
		config.Producer.Compression = sarama.CompressionGZIP
	case "snappy":
		config.Producer.Compression = sarama.CompressionSnappy
	}
	config.Producer.Flush.Bytes = conf.MaxMessageBytes
	config.Producer.Flush.Frequency = conf.FlushTimeout
	config.Producer.Flush.Messages = conf.BatchSize
	config.Producer.Flush.MaxMessages = conf.MaxMessagesPerRequest
	config.Producer.RequiredAcks = sarama.RequiredAcks(conf.Acks)
	config.Producer.Retry.Backoff = conf.RetryBackoff
	config.Producer.Timeout = conf.Timeout
	config.Producer.Return.Successes = conf.AckSuccesses

	partitionerFactory := &SaramaPartitionerFactory{conf.Partitioner}
	config.Producer.Partitioner = partitionerFactory.PartitionerConstructor
	client, err := sarama.NewClient(conf.BrokerList, config)
	if err != nil {
		panic(err)
	}

	producer, err := sarama.NewAsyncProducerFromClient(client)
	if err != nil {
		panic(err)
	}
	saramaProducer := &SaramaProducer{
		saramaProducer: producer,
		config:         conf,
	}
	saramaProducer.initSuccesses()
	saramaProducer.initErrors()
	saramaProducer.initInput()

	return saramaProducer
}
Esempio n. 8
0
func main() {
	var fromStart = flag.Bool("fromStart", true, "Read from beginning of file")
	var topic = flag.String("topic", "tailf", "Kafka topic to produce to")
	var clientId = flag.String("clientId", "tailf-client", "Kafka client ID")
	var brokerList = flag.String("brokerList", "127.0.0.1:9092", "Kafka broker list, comma-delimited.")
	var verbose = flag.Bool("verbose", false, "Verbose output")
	flag.Parse()
	if len(flag.Args()) != 1 {
		flag.Usage()
		os.Exit(1)
	}
	var filename = flag.Arg(0)

	follower, err := tailf.Follow(filename, *fromStart)
	if err != nil {
		log.Fatalf("couldn't follow %q: %v", filename, err)
	}
	defer follower.Close()

	clientConfig := sarama.NewConfig()
	clientConfig.ClientID = *clientId
	clientConfig.Producer.Timeout = 10 * time.Second
	client, err := sarama.NewClient(strings.Split(*brokerList, ","), clientConfig)
	if err != nil {
		panic(err)
	}
	defer client.Close()

	producer, err := sarama.NewAsyncProducerFromClient(client)
	if err != nil {
		panic(err)
	}
	defer producer.Close()

	scanner := bufio.NewScanner(follower)
	for scanner.Scan() {
		producer.Input() <- &sarama.ProducerMessage{Topic: *topic, Key: nil, Value: sarama.ByteEncoder(scanner.Bytes())}
		if *verbose {
			log.Println("Produced message:", scanner.Text())
		}
	}
	if err := scanner.Err(); err != nil {
		log.Fatalf("scanner error: %v", err)
	}
}
func (this *SyslogProducer) startProducers() {
	brokerList := strings.Split(this.config.BrokerList, ",")
	conf := this.config.ProducerConfig
	config := sarama.NewConfig()
	config.ClientID = conf.Clientid
	config.ChannelBufferSize = conf.SendBufferSize
	switch strings.ToLower(conf.CompressionCodec) {
	case "none":
		config.Producer.Compression = sarama.CompressionNone
	case "gzip":
		config.Producer.Compression = sarama.CompressionGZIP
	case "snappy":
		config.Producer.Compression = sarama.CompressionSnappy
	}
	config.Producer.Flush.Bytes = conf.FlushByteCount
	config.Producer.Flush.Frequency = conf.FlushTimeout
	config.Producer.Flush.Messages = conf.BatchSize
	config.Producer.Flush.MaxMessages = conf.MaxMessagesPerRequest
	config.Producer.MaxMessageBytes = conf.MaxMessageBytes
	config.Producer.Partitioner = sarama.NewRandomPartitioner
	config.Producer.RequiredAcks = sarama.RequiredAcks(conf.Acks)
	config.Producer.Retry.Backoff = conf.RetryBackoff
	config.Producer.Timeout = conf.Timeout

	for i := 0; i < this.config.NumProducers; i++ {
		client, err := sarama.NewClient(brokerList, config)
		if err != nil {
			panic(err)
		}

		Tracef(this, "Starting new producer with config: %#v", config)
		producer, err := sarama.NewAsyncProducerFromClient(client)
		if err != nil {
			panic(err)
		}
		this.producers = append(this.producers, producer)
		go this.produceRoutine(producer)
	}
}
Esempio n. 10
0
func produceN(t *testing.T, n int, topic string, brokerAddr string) {
	clientConfig := sarama.NewConfig()
	clientConfig.Producer.Timeout = 10 * time.Second
	client, err := sarama.NewClient([]string{brokerAddr}, clientConfig)
	if err != nil {
		t.Fatal(err)
	}
	defer client.Close()

	producer, err := sarama.NewAsyncProducerFromClient(client)
	if err != nil {
		t.Fatal(err)
	}
	defer producer.Close()
	for i := 0; i < n; i++ {
		producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: nil, Value: sarama.StringEncoder(fmt.Sprintf("test-kafka-message-%d", i))}
	}
	select {
	case e := <-producer.Errors():
		t.Fatalf("Failed to produce message: %s", e)
	case <-time.After(5 * time.Second):
	}
}
Esempio n. 11
0
func NewProducer(brokers []string, name string, config *sarama.Config, cb ProducerErrorCallback) (self *Producer, err error) {
	self = &Producer{
		callback: cb,
		config:   config,
		quit:     make(chan bool),
		done:     make(chan bool),
	}
	self.client, err = sarama.NewClient(brokers, nil)

	if config == nil {
		self.AsyncProducer, err = sarama.NewAsyncProducerFromClient(self.client)
	} else {
		self.AsyncProducer, err = sarama.NewAsyncProducer(brokers, self.config)
	}

	if err != nil {
		log.Errorf("failed to create producer: %s", err)
		return nil, err
	}

	go self.Start()

	return self, nil
}
// Siesta producer does not yet support compression
func produce(t *testing.T, messages []string, topic string, brokerAddr string, compression sarama.CompressionCodec) {
	clientConfig := sarama.NewConfig()
	clientConfig.Producer.Compression = compression
	clientConfig.Producer.Timeout = 10 * time.Second
	client, err := sarama.NewClient([]string{brokerAddr}, clientConfig)
	if err != nil {
		t.Fatal(err)
	}
	defer client.Close()

	producer, err := sarama.NewAsyncProducerFromClient(client)
	if err != nil {
		t.Fatal(err)
	}
	defer producer.Close()
	for _, message := range messages {
		producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: nil, Value: sarama.StringEncoder(message)}
	}
	select {
	case e := <-producer.Errors():
		t.Fatalf("Failed to produce message: %s", e)
	case <-time.After(5 * time.Second):
	}
}
Esempio n. 13
0
func main() {
	fmt.Println(("Starting Producer"))
	runtime.GOMAXPROCS(runtime.NumCPU())
	numMessage := 0

	brokerConnect, topic, sleepTime, graphiteConnect, graphiteFlushInterval, flushMsgCount, flushFrequency, producerCount, maxMessagesPerReq := resolveConfig()

	_ = graphiteConnect
	_ = graphiteFlushInterval
	startMetrics(graphiteConnect, graphiteFlushInterval)
	produceRate := metrics.NewRegisteredMeter("ProduceRate", metrics.DefaultRegistry)

	//kafkaClient.CreateMultiplePartitionsTopic(zkConnect, topic, numPartitions)

	//p := producer.NewKafkaProducer(topic, []string{brokerConnect})

	//defer producer.Close()
	//defer p.Close()

	saramaError := make(chan *sarama.ProducerError)
	saramaSuccess := make(chan *sarama.ProducerMessage)

	clientConfig := sarama.NewConfig()
	clientConfig.ClientID = uuid.NewV1().String()
	clientConfig.Producer.Flush.Messages = flushMsgCount
	clientConfig.Producer.Flush.Frequency = flushFrequency
	clientConfig.Producer.Flush.MaxMessages = maxMessagesPerReq
	clientConfig.Producer.Return.Successes = true
	clientConfig.Producer.RequiredAcks = sarama.NoResponse //WaitForAll
	clientConfig.Producer.Timeout = 1000 * time.Millisecond
	client, err := sarama.NewClient([]string{brokerConnect}, clientConfig)
	for i := 0; i < producerCount; i++ {
		if err != nil {
			panic(err)
		}

		//		config.Compression = 2
		producer, err := sarama.NewAsyncProducerFromClient(client)
		go func() {
			if err != nil {
				panic(err)
			}
			for {
				message := &sarama.ProducerMessage{Topic: topic, Key: sarama.StringEncoder(fmt.Sprintf("%d", numMessage)), Value: sarama.StringEncoder(fmt.Sprintf("message %d!", numMessage))}
				numMessage++
				producer.Input() <- message
				time.Sleep(sleepTime)
			}
		}()

		go func() {
			for {
				select {
				case error := <-producer.Errors():
					saramaError <- error
				case success := <-producer.Successes():
					saramaSuccess <- success
				}
			}
		}()
	}

	ctrlc := make(chan os.Signal, 1)
	signal.Notify(ctrlc, os.Interrupt)
	go func() {
		start := time.Now()
		count := 0
		for {
			select {
			case err := <-saramaError:
				fmt.Println(err)
			case <-saramaSuccess:
				produceRate.Mark(1)
				count++
				elapsed := time.Since(start)
				if elapsed.Seconds() >= 1 {
					fmt.Println(fmt.Sprintf("Per Second %d", count))
					count = 0
					start = time.Now()
				}
			}
		}
	}()
	<-ctrlc
}
Esempio n. 14
0
func main() {

	flag.Parse()
	//confPath = "/home/wens/go/src/logk/conf.json"
	confFile, err := os.Open(confPath)

	if err != nil {
		if err == os.ErrNotExist {
			fmt.Printf("Can not find conf : %s ", confPath)
			os.Exit(1)
		}
		fmt.Printf("Read conf fail : %s", confPath)
		os.Exit(1)
	}

	bb, err := ioutil.ReadAll(confFile)

	if err != nil {
		panic(err)
	}
	conf := new(Conf)
	err = json.Unmarshal(bb, conf)
	if err != nil {
		panic(err)
	}

	if len(conf.KafkaServers) == 0 {
		fmt.Println("Please config kafka servers ")
		os.Exit(1)
	}

	if len(conf.Topic) == 0 {
		fmt.Println("Please config topic ")
		os.Exit(1)
	}

	if len(conf.Tails) == 0 {
		fmt.Println("Please config tails ")
		os.Exit(1)
	}

	for _, tl := range conf.Tails {

		if len(tl.AppName) == 0 {
			fmt.Println("Please config appName ")
			os.Exit(1)
		}

		if len(tl.LogPath) == 0 {
			fmt.Println("Please config logPath ")
			os.Exit(1)
		}
	}

	if conf.BatchSize <= 0 {
		conf.BatchSize = 10
	}

	if len(conf.HostName) == 0 {
		conf.HostName, _ = os.Hostname()
	}

	kconf := s.NewConfig()
	kconf.Net.DialTimeout = time.Second * 30
	kconf.Producer.MaxMessageBytes = 2 * 1024
	kconf.Producer.Retry.Max = 100
	kconf.Producer.Retry.Backoff = time.Second * 10
	kconf.Producer.Timeout = time.Second * 6
	kconf.Producer.Flush.Frequency = time.Second * 5
	kconf.Producer.Flush.Messages = conf.BatchSize

	kclient, err := s.NewClient(conf.KafkaServers, kconf)

	if err != nil {
		panic(err)
	}
	defer kclient.Close()

	if debug {
		fmt.Printf("connect to kafka success . \n")
	}

	producer, err := s.NewAsyncProducerFromClient(kclient)

	if err != nil {
		panic(err)
	}
	defer producer.Close()

	waiGroup.Add(1)
	go func() {
		defer waiGroup.Done()

		for {

			select {
			case e := <-producer.Errors():
				fmt.Println(e.Err)
				time.Sleep(time.Second * 2)
				producer.Input() <- e.Msg
			case <-closeChan:
				return
			default:
			}

			select {
			case r := <-recordChan:
				buf := bytes.NewBuffer(make([]byte, 0))
				buf.WriteString(r.Content)
				buf.WriteString("|#|")
				buf.WriteString(r.AppName)
				buf.WriteString("|#|")
				buf.WriteString(r.LogPath)
				buf.WriteString("|#|")
				buf.WriteString(r.HostName)
				buf.WriteString("|#|")
				buf.WriteString(strconv.FormatInt(r.Timestamp, 10))
				buf.WriteString("|#|")
				bb, err := json.Marshal(r.Tags)
				if err != nil {
					if debug {
						fmt.Println(err)
					}
					continue
				}
				buf.Write(bb)

				runtimeInfo[r.LogPath] = r.Offset
				producer.Input() <- &s.ProducerMessage{
					Topic: conf.Topic,
					Value: s.ByteEncoder(buf.Bytes()),
					Key:   s.StringEncoder(r.AppName),
				}
			case <-closeChan:
				return

			}
		}
	}()

	recoverRuntimeInfo()
	defer saveRuntimeInfo()

	for _, tailConf := range conf.Tails {
		waiGroup.Add(1)
		go tailFile(conf, tailConf, conf.Topic)
	}

	c := make(chan os.Signal, 1)
	signal.Notify(c, os.Interrupt, os.Kill)
	<-c
	close(closeChan)
	fmt.Println("Waiting for exit.")
	waiGroup.Wait()
	fmt.Println("Exit ok.")

}