コード例 #1
0
func generateKafkaData(t *testing.T, topic string) {
	config := sarama.NewConfig()
	client, err := sarama.NewClient([]string{getTestKafkaHost()}, config)
	if err != nil {
		t.Errorf("%s", err)
	}

	producer, err := sarama.NewSyncProducerFromClient(client)
	if err != nil {
		t.Error(err)
	}
	defer producer.Close()

	msg := &sarama.ProducerMessage{
		Topic: topic,
		Value: sarama.StringEncoder("Hello World"),
	}

	_, _, err = producer.SendMessage(msg)
	if err != nil {
		t.Errorf("FAILED to send message: %s\n", err)
	}

	client.RefreshMetadata(topic)
}
コード例 #2
0
ファイル: client.go プロジェクト: powellchristoph/rsekafka
func getProducer(client sarama.Client) sarama.SyncProducer {
	//fmt.Println("Getting producer.")
	producer, err := sarama.NewSyncProducerFromClient(client)
	if err != nil {
		panic(err)
	}
	return producer
}
コード例 #3
0
ファイル: sangrenel.go プロジェクト: prezi/sangrenel
// clientProducer generates random messages and writes to Kafka.
// Workers track and limit message rates using incrSent() and fetchSent().
// Default 5 instances of clientProducer are created under each Kafka client.
func clientProducer(c kafka.Client) {
	producer, err := kafka.NewSyncProducerFromClient(c)
	if err != nil {
		log.Println(err.Error())
	}
	defer producer.Close()

	// Instantiate rand per producer to avoid mutex contention.
	source := rand.NewSource(time.Now().UnixNano())
	generator := rand.New(source)
	msgData := make([]byte, msgSize)

	// Use a local accumulator then periodically update global counter.
	// Global counter can become a bottleneck with too many threads.
	tick := time.Tick(3 * time.Millisecond)
	var n int64

	for {
		// Message rate limit works by having all clientProducer loops incrementing
		// a global counter and tracking the aggregate per-second progress.
		// If the configured rate is met, the worker will sleep
		// for the remainder of the 1 second window.
		rateEnd := time.Now().Add(time.Second)
		countStart := fetchSent()
		var start time.Time
		for fetchSent()-countStart < msgRate {
			randMsg(msgData, *generator)
			msg := &kafka.ProducerMessage{Topic: topic, Value: kafka.ByteEncoder(msgData)}
			// We start timing after the message is created.
			// This ensures latency metering from the time between message sent and receiving an ack.
			start = time.Now()
			_, _, err = producer.SendMessage(msg)
			if err != nil {
				log.Println(err)
			} else {
				// Increment global sent count and fire off time since start value into the latency channel.
				n++
				select {
				case <-tick:
					incrSent(n)
					n = 0
				default:
					break
				}
				latencies <- time.Since(start).Seconds() * 1000
			}
		}
		// If the global per-second rate limit was met,
		// the inner loop breaks and the outer loop sleeps for the second remainder.
		time.Sleep(rateEnd.Sub(time.Now()) + time.Since(start))
	}
}
コード例 #4
0
ファイル: transmithandler.go プロジェクト: PieterD/crap
func New(kfkConn sarama.Client) (*TransmitHandler, error) {
	producer, err := sarama.NewSyncProducerFromClient(kfkConn)
	if err != nil {
		return nil, err
	}
	th := &TransmitHandler{
		kill:     killchan.New(),
		dead:     killchan.New(),
		bus:      make(chan Transmit),
		producer: producer,
	}
	go th.run()
	return th, nil
}
コード例 #5
0
// Seed messages
func testSeed(n int) error {
	producer, err := sarama.NewSyncProducerFromClient(testClient)
	if err != nil {
		return err
	}

	for i := 0; i < n; i++ {
		kv := sarama.StringEncoder(fmt.Sprintf("PLAINDATA-%08d", i))
		for _, t := range testTopics {
			msg := &sarama.ProducerMessage{Topic: t, Key: kv, Value: kv}
			if _, _, err := producer.SendMessage(msg); err != nil {
				return err
			}
		}
	}
	return producer.Close()
}
コード例 #6
0
// NewKafkaDeliver creates connection to Kafka
func NewKafkaDeliver(store *Store, clientID string, brokerList []string) (*KafkaDeliver, error) {

	config := sarama.NewConfig()

	config.ClientID = clientID
	config.Producer.RequiredAcks = sarama.WaitForAll
	config.Net.DialTimeout = 2 * time.Second // real connect time is about 4* DialTimeout (?)

	client, err := sarama.NewClient(brokerList, config)

	if err != nil {
		return nil, err
	}

	producer, err := sarama.NewSyncProducerFromClient(client)

	if err != nil {

		return nil, err
	}

	defer func() {
		if err != nil {
			log.Errorf("go=kafka at=defer-close-producer after error %v\n", err)
			if err := producer.Close(); err != nil {
				log.Errorf("go=kafka at=producer-close fatal error %v\n", err)
				os.Exit(4)
			}
		}
	}()

	return &KafkaDeliver{
		clientID:          clientID,
		brokerList:        brokerList,
		store:             store,
		producer:          producer,
		client:            client,
		config:            config,
		deliverGoroutines: maxDeliverGoroutines,
		shutdownDeliver:   make(chan bool, maxDeliverGoroutines),
		shutdown:          make(chan bool, maxDeliverGoroutines),
	}, nil

}
コード例 #7
0
ファイル: kafka.go プロジェクト: micro/go-plugins
func (k *kBroker) Connect() error {
	if k.c != nil {
		return nil
	}

	c, err := sarama.NewClient(k.addrs, sarama.NewConfig())
	if err != nil {
		return err
	}

	k.c = c

	p, err := sarama.NewSyncProducerFromClient(c)
	if err != nil {
		return err
	}

	k.p = p

	config := sc.NewConfig()
	// TODO: make configurable offset as SubscriberOption
	config.Config.Consumer.Offsets.Initial = sarama.OffsetNewest

	cs, err := sc.NewClient(k.addrs, config)
	if err != nil {
		return err
	}

	k.sc = cs
	// TODO: TLS
	/*
		opts.Secure = k.opts.Secure
		opts.TLSConfig = k.opts.TLSConfig

		// secure might not be set
		if k.opts.TLSConfig != nil {
			opts.Secure = true
		}
	*/
	return nil
}
コード例 #8
0
ファイル: kafka.go プロジェクト: bunin/Flotilla
// NewPeer creates and returns a new Peer for communicating with Kafka.
func NewPeer(host string) (*Peer, error) {
	sarama.Logger = log.New(os.Stdout, "DEBUG: ", log.Ldate|log.Ltime|log.Lshortfile)

	host = strings.Split(host, ":")[0] + ":9092"
	config := sarama.NewConfig()
	config.Consumer.Fetch.Default = 10 * 1024 * 1024
	client, err := sarama.NewClient([]string{host}, config)
	if err != nil {
		return nil, err
	}

	producer, err := sarama.NewSyncProducerFromClient(client)
	if err != nil {
		return nil, err
	}

	return &Peer{
		client:   client,
		producer: producer,
		send:     make(chan []byte),
		errors:   make(chan error, 1),
		done:     make(chan bool),
	}, nil
}
コード例 #9
0
ファイル: kafka.go プロジェクト: uswitch/syslogger
func newProducerFromZookeeper() (sarama.Client, sarama.SyncProducer, error) {
	brokers, err := kafkazk.LookupBrokers(cfg.zkstring)
	if err != nil {
		return nil, nil, err
	}

	brokerStr := make([]string, len(brokers))
	for i, b := range brokers {
		brokerStr[i] = fmt.Sprintf("%s:%d", b.Host, b.Port)
	}

	logger.Println("connecting to Kafka, using brokers from ZooKeeper:", brokerStr)
	client, err := sarama.NewClient(brokerStr, sarama.NewConfig())
	if err != nil {
		return nil, nil, err
	}

	producer, err := sarama.NewSyncProducerFromClient(client)
	if err != nil {
		return nil, nil, err
	}

	return client, producer, nil
}
コード例 #10
0
ファイル: indexer.go プロジェクト: jackdoe/no
func main() {
	var proc = flag.Int("proc", 16, "max concurency")
	var srv = flag.String("srv", "0.0.0.0:8004", "server socket")
	var netprofile = flag.Bool("netprofile", false, "open socket for remote profiling")
	var brokers = flag.String("brokers", "./brokers", "file containing Kafka brokers to connect to")
	flag.Parse()

	if *netprofile {
		go func() {
			trace.AuthRequest = func(req *http.Request) (any, sensitive bool) {
				return true, true
			}
			addr := "0.0.0.0:6061"
			log.Println("start debug HTTP server", addr)
			log.Println(http.ListenAndServe(addr, nil))
		}()
	}

	statInit()
	hostname, _ := os.Hostname()
	log.Println("GOMAXPROCS", runtime.GOMAXPROCS(0))
	log.Println("hostname", hostname)

	brokerContent, err := ioutil.ReadFile(*brokers)
	if err != nil {
		log.Fatalf("failed to read file '%s': %v", brokers, err)
	}

	brokerList := strings.Split(string(brokerContent), "\n")
	log.Printf("kafka brokers: %s", strings.Join(brokerList, ", "))

	kafkaClient, err := newKafkaClient(*proc, brokerList, hostname)
	if err != nil {
		log.Fatalf("failed to connect to kafka: %v", err)
	}

	defer kafkaClient.Close()
	dataProducer, _ := sarama.NewSyncProducerFromClient(kafkaClient)
	defer dataProducer.Close()
	indexProducer, _ := sarama.NewSyncProducerFromClient(kafkaClient)
	defer indexProducer.Close()

	wg := &sync.WaitGroup{}
	tickCh := time.Tick(time.Second)
	indexDumperCh := startIndexDumper(indexProducer, wg)
	indexBuilderCh := startIndexBuilder(*proc, indexDumperCh, tickCh, wg)

	log.Println("listen to", *srv)
	conn, err := net.Listen("tcp", *srv)
	if err != nil {
		log.Fatalf("failed to listen: %v", err)
	}

	grpcServer := grpc.NewServer(grpc.MaxConcurrentStreams(uint32(*proc * 2)))
	pb.RegisterIndexerServer(grpcServer, &indexerServer{dataProducer, indexBuilderCh})

	c := make(chan os.Signal, 1)
	signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
	go func() {
		<-c
		log.Println("stopping gRPC")
		grpcServer.Stop()
	}()

	grpcServer.Serve(conn)
	time.Sleep(100 * time.Millisecond) // let gRPC's goroutines to complete
	close(indexBuilderCh)

	log.Println("waiting completion of goroutines")
	wg.Wait()
	log.Println("bye")
}