func NewTestKafkaClient(brokers []string) *TestKafkaClient { tkc := &TestKafkaClient{} clientCfg := sarama.NewConfig() clientCfg.ClientID = "unittest-runner" err := error(nil) if tkc.client, err = sarama.NewClient(brokers, clientCfg); err != nil { panic(err) } if tkc.consumer, err = sarama.NewConsumerFromClient(tkc.client); err != nil { panic(err) } return tkc }
func (gc *groupConsumer) start(stoppedCh chan<- dispatchTier) { spawn(&gc.wg, func() { defer func() { stoppedCh <- gc }() var err error gc.dumbConsumer, err = sarama.NewConsumerFromClient(gc.kafkaClient) if err != nil { // Must never happen. panic(ErrConsumerSetup(fmt.Errorf("failed to create sarama.Consumer: err=(%v)", err))) } gc.registry = spawnConsumerGroupRegister(gc.group, gc.config.ClientID, gc.config, gc.kazooConn) var manageWg sync.WaitGroup spawn(&manageWg, gc.managePartitions) gc.dispatcher.start() // Wait for a stop signal and shutdown gracefully when one is received. <-gc.stoppingCh gc.dispatcher.stop() gc.registry.stop() manageWg.Wait() gc.dumbConsumer.Close() }) }
func NewKafkaHelper(c *C) *KafkaHelper { kh := &KafkaHelper{c: c} cfg := sarama.NewConfig() cfg.Producer.Return.Successes = true cfg.Producer.Return.Errors = true cfg.Consumer.Offsets.CommitInterval = 50 * time.Millisecond cfg.ClientID = "unittest-runner" err := error(nil) if kh.client, err = sarama.NewClient(KafkaPeers, cfg); err != nil { panic(err) } if kh.consumer, err = sarama.NewConsumerFromClient(kh.client); err != nil { panic(err) } if kh.producer, err = sarama.NewAsyncProducerFromClient(kh.client); err != nil { panic(err) } if kh.offsetMgr, err = sarama.NewOffsetManagerFromClient(kh.client); err != nil { panic(err) } return kh }