Esempio n. 1
0
func (e *Executor) newProducer(valueSerializer func(interface{}) ([]byte, error)) (*producer.KafkaProducer, error) {
	if Config.ProducerProperties != "" {
		producerConfig, err := producer.ProducerConfigFromFile(Config.ProducerProperties)
		if err != nil {
			return nil, err
		}

		c, err := cfg.LoadNewMap(Config.ProducerProperties)
		if err != nil {
			return nil, err
		}

		connectorConfig := siesta.NewConnectorConfig()
		connectorConfig.BrokerList = strings.Split(c["bootstrap.servers"], ",")

		connector, err := siesta.NewDefaultConnector(connectorConfig)
		if err != nil {
			return nil, err
		}

		return producer.NewKafkaProducer(producerConfig, producer.ByteSerializer, valueSerializer, connector), nil
	} else {
		producerConfig := producer.NewProducerConfig()
		connectorConfig := siesta.NewConnectorConfig()
		connectorConfig.BrokerList = strings.Split(Config.BrokerList, ",")

		connector, err := siesta.NewDefaultConnector(connectorConfig)
		if err != nil {
			return nil, err
		}

		return producer.NewKafkaProducer(producerConfig, producer.ByteSerializer, valueSerializer, connector), nil
	}
}
func (this *MirrorMaker) startProducers() {
	for i := 0; i < this.config.NumProducers; i++ {
		conf, err := producer.ProducerConfigFromFile(this.config.ProducerConfig)
		if err != nil {
			panic(err)
		}
		if this.config.PreservePartitions {
			conf.Partitioner = producer.NewManualPartitioner()
		}
		connectorConfig := siesta.NewConnectorConfig()
		connectorConfig.BrokerList = conf.BrokerList
		connector, err := siesta.NewDefaultConnector(connectorConfig)
		if err != nil {
			panic(err)
		}

		producer := producer.NewKafkaProducer(conf, this.config.KeyEncoder, this.config.ValueEncoder, connector)
		this.producers = append(this.producers, producer)
		if this.config.PreserveOrder {
			go this.produceRoutine(producer, i)
		} else {
			go this.produceRoutine(producer, 0)
		}
	}
}
func TestMetricsEmitter(t *testing.T) {
	partitions := 1
	topic := fmt.Sprintf("testMetricsEmitter-%d", time.Now().Unix())

	CreateMultiplePartitionsTopic(localZk, topic, partitions)
	EnsureHasLeader(localZk, topic)

	consumeMessages := 1
	consumeStatus := make(chan int)
	delayTimeout := 10 * time.Second

	metricsProducerConfig := producer.NewProducerConfig()
	connectorConfig := siesta.NewConnectorConfig()
	connectorConfig.BrokerList = []string{localBroker}
	reporter, err := NewCodahaleKafkaReporter(topic, schemaRepositoryUrl, metricsProducerConfig, connectorConfig)
	assert(t, err, nil)

	config := testConsumerConfig()
	config.Strategy = newCountingStrategy(t, consumeMessages, consumeTimeout, consumeStatus)
	consumer := NewConsumer(config)
	go consumer.Metrics().WriteJSON(10*time.Second, reporter)
	go consumer.StartStatic(map[string]int{topic: 1})

	if actual := <-consumeStatus; actual != consumeMessages {
		t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", consumeMessages, consumeTimeout, actual)
	}

	closeWithin(t, delayTimeout, consumer)
}
func TestLogEmitter(t *testing.T) {
	partitions := 1
	topic := fmt.Sprintf("testLogEmitter-%d", time.Now().Unix())

	CreateMultiplePartitionsTopic(localZk, topic, partitions)
	EnsureHasLeader(localZk, topic)

	loggerConfig := NewKafkaLogEmitterConfig()
	loggerConfig.SchemaRegistryUrl = schemaRepositoryUrl
	loggerConfig.Topic = topic
	loggerConfig.Source = "go_kafka_client.log.emitter"
	loggerConfig.Tags = map[string]string{"origin": topic}
	loggerConfig.ProducerConfig = producer.NewProducerConfig()
	loggerConfig.ConnectorConfig = siesta.NewConnectorConfig()
	loggerConfig.ConnectorConfig.BrokerList = []string{localBroker}

	logger, err := NewKafkaLogEmitter(loggerConfig)
	assert(t, err, nil)
	logger.Info("Message sent at %d", time.Now().Unix())

	consumeMessages := 1
	consumeStatus := make(chan int)
	delayTimeout := 10 * time.Second

	config := testConsumerConfig()
	config.Strategy = newCountingStrategy(t, consumeMessages, consumeTimeout, consumeStatus)
	consumer := NewConsumer(config)
	go consumer.StartStatic(map[string]int{topic: 1})

	if actual := <-consumeStatus; actual != consumeMessages {
		t.Errorf("Failed to consume %d messages within %s. Actual messages = %d", consumeMessages, consumeTimeout, actual)
	}

	closeWithin(t, delayTimeout, consumer)
}
Esempio n. 5
0
func testConnector(t *testing.T) *siesta.DefaultConnector {
	config := siesta.NewConnectorConfig()
	config.BrokerList = []string{"localhost:9092"}

	connector, err := siesta.NewDefaultConnector(config)
	if err != nil {
		t.Fatal(err)
	}
	return connector
}
Esempio n. 6
0
func DefaultProducer(brokerList []string) (*producer.KafkaProducer, error) {
	producerConfig := producer.NewProducerConfig()
	producerConfig.BatchSize = 200
	producerConfig.ClientID = "zipkin"
	kafkaConnectorConfig := siesta.NewConnectorConfig()
	kafkaConnectorConfig.BrokerList = brokerList
	connector, err := siesta.NewDefaultConnector(kafkaConnectorConfig)
	if err != nil {
		return nil, err
	}
	return producer.NewKafkaProducer(producerConfig, producer.ByteSerializer, producer.ByteSerializer, connector), nil
}
Esempio n. 7
0
func cmdBenchmarkSet() error {

	if len(globalBrokerList) == 0 {
		return errors.NotValidf("broker list")
	}
	if len(globalTopic) == 0 {
		return errors.NotValidf("Topic")
	}

	sendString := utils.GenTestMessage(globalMsgLength)
	producerConfig := siesta_producer.NewProducerConfig()
	producerConfig.Linger = time.Millisecond
	connConfig := siesta.NewConnectorConfig()
	brokerList := strings.Split(globalBrokerList, ",")
	producerConfig.BrokerList = brokerList
	connConfig.BrokerList = brokerList

	log.Printf("%v", brokerList)
	connector, err := siesta.NewDefaultConnector(connConfig)
	if err != nil {
		return errors.Trace(err)
	}
	//	go func() {
	//		timeout := time.Tick(producerConfig.MetadataExpire / 2)
	//		for {
	//			<-timeout
	//			connector.RefreshMetadata([]string{globalTopic})
	//		}
	//	}()

	producer := siesta_producer.NewKafkaProducer(producerConfig,
		siesta_producer.ByteSerializer,
		siesta_producer.ByteSerializer,
		connector)

	bt := utils.NewBenchmarkTester(globalConcurrentLevel, globalDuration,
		func(bt *utils.BenchmarkTester, index int) error {

			record := &siesta_producer.ProducerRecord{
				Topic: globalTopic,
				Value: []byte(sendString),
			}

			recordMetadata := <-producer.Send(record)
			if recordMetadata.Error == siesta.ErrNoError {
				return nil
			}
			return recordMetadata.Error
		}, nil)
	return errors.Trace(bt.Run())
}
Esempio n. 8
0
func main() {
	config := siesta.NewConnectorConfig()
	config.BrokerList = []string{"localhost:9092"}

	client, err := siesta.NewDefaultConnector(config)
	if err != nil {
		panic(err)
	}

	consumer := gonzo.NewConsumer(client, gonzo.NewConsumerConfig(), consumerStrategy)
	consumer.Add("gonzo", 0)
	consumer.Add("gonzo", 1)

	consumer.Join()
}
Esempio n. 9
0
func (this *SyslogProducer) startProducers() {
	brokerList := strings.Split(this.config.BrokerList, ",")

	connectorConfig := siesta.NewConnectorConfig()
	connectorConfig.BrokerList = brokerList
	connector, err := siesta.NewDefaultConnector(connectorConfig)
	if err != nil {
		panic(err)
	}

	for i := 0; i < this.config.NumProducers; i++ {
		glog.V(2).Infof("Starting new producer with config: %#v", this.config.ProducerConfig)
		producer := producer.NewKafkaProducer(this.config.ProducerConfig, producer.ByteSerializer, this.config.ValueSerializer, connector)
		this.producers = append(this.producers, producer)
		go this.produceRoutine(producer)
	}
}
Esempio n. 10
0
func cmdBenchmarkSetNoAck() error {

	if len(globalBrokerList) == 0 {
		return errors.NotValidf("broker list")
	}
	if len(globalTopic) == 0 {
		return errors.NotValidf("Topic")
	}

	sendString := utils.GenTestMessage(globalMsgLength)
	producerConfig := siesta_producer.NewProducerConfig()
	producerConfig.ClientID = "Benchmark"
	producerConfig.RequiredAcks = 0
	connConfig := siesta.NewConnectorConfig()
	brokerList := strings.Split(globalBrokerList, ",")
	producerConfig.BrokerList = brokerList
	connConfig.BrokerList = brokerList

	log.Printf("%v", brokerList)
	connector, err := siesta.NewDefaultConnector(connConfig)
	if err != nil {
		return errors.Trace(err)
	}

	producer := siesta_producer.NewKafkaProducer(producerConfig,
		siesta_producer.ByteSerializer,
		siesta_producer.ByteSerializer,
		connector)

	bt := utils.NewBenchmarkTester(globalConcurrentLevel, globalDuration,
		func(bt *utils.BenchmarkTester, index int) error {

			record := &siesta_producer.ProducerRecord{
				Topic: globalTopic,
				Value: []byte(sendString),
			}

			recordMetadata := <-producer.Send(record)
			if recordMetadata.Error == siesta.ErrNoError {
				return nil
			}
			return recordMetadata.Error
		}, nil)
	return errors.Trace(bt.Run())
}
Esempio n. 11
0
func (kc *KafkaConsumer) Start() (<-chan *gonzo.MessageAndMetadata, error) {
	config := siesta.NewConnectorConfig()
	config.BrokerList = kc.brokerList

	client, err := siesta.NewDefaultConnector(config)
	if err != nil {
		return nil, err
	}

	consumerConfig := gonzo.NewConsumerConfig()
	kc.consumer = gonzo.NewConsumer(client, consumerConfig, kc.messageCallback)
	for _, partition := range kc.partitions {
		for _, topic := range kc.topics {
			kc.consumer.Add(topic, partition)
		}
	}
	return kc.messages, nil
}
// This will be called right after connecting to ConsumerCoordinator so this client can initialize itself
// with bootstrap broker list for example. May return an error to signal this client is unable to work with given configuration.
func (this *SiestaClient) Initialize() error {
	bootstrapBrokers, err := BootstrapBrokers(this.config.Coordinator)
	if err != nil {
		return err
	}

	connectorConfig := siesta.NewConnectorConfig()
	connectorConfig.BrokerList = bootstrapBrokers
	connectorConfig.ReadTimeout = this.config.SocketTimeout
	connectorConfig.WriteTimeout = this.config.SocketTimeout
	connectorConfig.ConnectTimeout = this.config.SocketTimeout
	connectorConfig.FetchSize = this.config.FetchMessageMaxBytes
	connectorConfig.ClientID = this.config.Clientid

	this.connector, err = siesta.NewDefaultConnector(connectorConfig)
	if err != nil {
		return err
	}

	return nil
}