Esempio n. 1
0
func parseAndValidateArgs() *kafka.MirrorMakerConfig {
	flag.Var(&consumerConfig, "consumer.config", "Path to consumer configuration file.")
	flag.Parse()
	runtime.GOMAXPROCS(*maxProcs)

	if (*whitelist != "" && *blacklist != "") || (*whitelist == "" && *blacklist == "") {
		fmt.Println("Exactly one of whitelist or blacklist is required.")
		os.Exit(1)
	}
	if *producerConfig == "" {
		fmt.Println("Producer config is required.")
		os.Exit(1)
	}
	if len(consumerConfig) == 0 {
		fmt.Println("At least one consumer config is required.")
		os.Exit(1)
	}
	if *queueSize < 0 {
		fmt.Println("Queue size should be equal or greater than 0")
		os.Exit(1)
	}
	if *timingsProducerConfig == "" && *schemaRegistryUrl == "" {
		fmt.Println("--schema.registry.url parameter is required when --timings is used")
	}

	config := kafka.NewMirrorMakerConfig()
	config.Blacklist = *blacklist
	config.Whitelist = *whitelist
	config.ChannelSize = *queueSize
	config.ConsumerConfigs = []string(consumerConfig)
	config.NumProducers = *numProducers
	config.NumStreams = *numStreams
	config.PreservePartitions = *preservePartitions
	config.PreserveOrder = *preserveOrder
	config.ProducerConfig = *producerConfig
	config.TopicPrefix = *prefix
	if *schemaRegistryUrl != "" {
		config.KeyEncoder = kafka.NewKafkaAvroEncoder(*schemaRegistryUrl)
		config.ValueEncoder = kafka.NewKafkaAvroEncoder(*schemaRegistryUrl)
		config.KeyDecoder = kafka.NewKafkaAvroDecoder(*schemaRegistryUrl)
		config.ValueDecoder = kafka.NewKafkaAvroDecoder(*schemaRegistryUrl)
	}
	config.TimingsProducerConfig = *timingsProducerConfig

	return config
}
Esempio n. 2
0
func produceAvro() {
	config1 := kafka.DefaultProducerConfig()
	config1.BrokerList = strings.Split(*brokerList, ",")
	config1.ValueEncoder = kafka.NewKafkaAvroEncoder(*schemaRegistry)
	config1.AckSuccesses = true

	producer1 := kafka.NewSaramaProducer(config1)

	config2 := kafka.DefaultProducerConfig()
	config2.BrokerList = strings.Split(*brokerList, ",")
	config2.ValueEncoder = kafka.NewKafkaAvroEncoder(*schemaRegistry)
	producer2 := kafka.NewSaramaProducer(config2)

	avroSchema, err := avro.ParseSchemaFile(*avroSchema)
	if err != nil {
		panic(err)
	}

	_, err = kafka.NewCachedSchemaRegistryClient(*schemaRegistry).Register(avroSchema.GetName()+"-value", avroSchema)
	if err != nil {
		panic(err)
	}

	decoder := kafka.NewKafkaAvroDecoder(*schemaRegistry)
	go func() {
		for message := range producer1.Successes() {
			rawRecord, err := decoder.Decode(message.Value.([]byte))
			if err != nil {
				panic(err)
			}
			record := rawRecord.(*avro.GenericRecord)
			timings := record.Get("timings").([]interface{})
			timings = append(timings, time.Now().UnixNano()/int64(time.Millisecond))
			record.Set("timings", timings)

			producer2.Input() <- &kafka.ProducerMessage{Topic: *topic2, Value: record}
		}
	}()

	for _ = range time.Tick(1 * time.Second) {
		messagesSent := 0
		for messagesSent < *perSecond {
			record := avro.NewGenericRecord(avroSchema)
			record.Set("id", int64(0))
			record.Set("timings", []int64{time.Now().UnixNano() / int64(time.Millisecond)})
			record.Set("value", []byte{})

			message := &kafka.ProducerMessage{Topic: *topic1, Value: record}
			producer1.Input() <- message
			messagesSent++
		}
	}
}
Esempio n. 3
0
func main() {
	parseArgs()

	go_kafka_client.Logger = go_kafka_client.NewDefaultLogger(go_kafka_client.ErrorLevel)
	kafkaProducer = producer.NewKafkaProducer(writeTopic, []string{broker})

	//Coordinator settings
	zookeeperConfig := go_kafka_client.NewZookeeperConfig()
	zookeeperConfig.ZookeeperConnect = []string{zookeeper}

	//Actual consumer settings
	consumerConfig := go_kafka_client.DefaultConsumerConfig()
	consumerConfig.AutoOffsetReset = go_kafka_client.SmallestOffset
	consumerConfig.Coordinator = go_kafka_client.NewZookeeperCoordinator(zookeeperConfig)
	consumerConfig.Groupid = group
	consumerConfig.NumWorkers = 1
	consumerConfig.NumConsumerFetchers = 1
	consumerConfig.KeyDecoder = go_kafka_client.NewKafkaAvroDecoder(schemaRepo)
	consumerConfig.ValueDecoder = consumerConfig.KeyDecoder

	consumerConfig.Strategy = func(worker *go_kafka_client.Worker, message *go_kafka_client.Message, taskId go_kafka_client.TaskId) go_kafka_client.WorkerResult {
		time.Sleep(2 * time.Second)
		record, ok := message.DecodedValue.(*avro.GenericRecord)
		if !ok {
			panic("Not a *GenericError, but expected one")
		}

		fmt.Printf("golang > received %s\n", fmt.Sprintf("{\"counter\": %d, \"name\": \"%s\", \"uuid\": \"%s\"}", record.Get("counter"), record.Get("name"), record.Get("uuid")))
		modify(record)
		encoded, err := encoder.Encode(record)
		if err != nil {
			panic(err)
		}

		if err := kafkaProducer.SendBytesSync(encoded); err != nil {
			panic(err)
		}

		return go_kafka_client.NewSuccessfulResult(taskId)
	}

	consumerConfig.WorkerFailureCallback = func(_ *go_kafka_client.WorkerManager) go_kafka_client.FailedDecision {
		return go_kafka_client.CommitOffsetAndContinue
	}
	consumerConfig.WorkerFailedAttemptCallback = func(_ *go_kafka_client.Task, _ go_kafka_client.WorkerResult) go_kafka_client.FailedDecision {
		return go_kafka_client.CommitOffsetAndContinue
	}

	kafkaConsumer = go_kafka_client.NewConsumer(consumerConfig)

	pingPongLoop()
}
Esempio n. 4
0
func (this *TransformExecutor) startProducer() {
	producerConfig, err := kafka.ProducerConfigFromFile(this.config.ProducerConfig)
	if err != nil {
		panic(err)
	}

	cfgMap := make(map[string]string)
	err = cfg.Load(this.config.ProducerConfig, cfgMap)
	if err != nil {
		panic(err)
	}

	this.avroDecoder = kafka.NewKafkaAvroDecoder(cfgMap["schema.registry.url"])

	producerConfig.KeyEncoder = kafka.NewKafkaAvroEncoder(cfgMap["schema.registry.url"])
	producerConfig.ValueEncoder = producerConfig.KeyEncoder
	producerConfig.SendBufferSize = 10000
	producerConfig.BatchSize = 2000
	producerConfig.MaxMessagesPerRequest = 5000

	this.producer = kafka.NewSaramaProducer(producerConfig)
	go this.produceRoutine()
}
Esempio n. 5
0
func (this *EventFetcher) createConsumer() (*kafka.Consumer, error) {
	fmt.Println(this.config.ZkConnect)
	coordinatorConfig := kafka.NewZookeeperConfig()
	coordinatorConfig.ZookeeperConnect = []string{this.config.ZkConnect}
	coordinator := kafka.NewZookeeperCoordinator(coordinatorConfig)
	consumerConfig := kafka.DefaultConsumerConfig()
	consumerConfig.AutoOffsetReset = kafka.LargestOffset
	consumerConfig.Coordinator = coordinator
	consumerConfig.Groupid = "event-dashboard"
	consumerConfig.ValueDecoder = kafka.NewKafkaAvroDecoder(this.config.SchemaRegistryUrl)
	consumerConfig.WorkerFailureCallback = func(_ *kafka.WorkerManager) kafka.FailedDecision {
		return kafka.CommitOffsetAndContinue
	}
	consumerConfig.WorkerFailedAttemptCallback = func(_ *kafka.Task, _ kafka.WorkerResult) kafka.FailedDecision {
		return kafka.CommitOffsetAndContinue
	}
	consumerConfig.Strategy = func(_ *kafka.Worker, msg *kafka.Message, taskId kafka.TaskId) kafka.WorkerResult {
		if record, ok := msg.DecodedValue.(*avro.GenericRecord); ok {
			this.events <- &Event{
				EventName:     record.Get("eventname").(string),
				Second:        record.Get("second").(int64),
				Framework:     record.Get("framework").(string),
				Latency:       record.Get("latency").(int64),
				ReceivedCount: record.Get("received_count").(int64),
				SentCount:     record.Get("sent_count").(int64),
			}
		} else {
			return kafka.NewProcessingFailedResult(taskId)
		}

		return kafka.NewSuccessfulResult(taskId)
	}

	consumer, err := kafka.NewSlaveConsumer(consumerConfig)
	return consumer, err
}
Esempio n. 6
0
func (this *EventFetcher) createConsumer() *kafka.Consumer {
	coordinatorConfig := kafka.NewZookeeperConfig()
	coordinatorConfig.ZookeeperConnect = []string{this.config.ZkConnect}
	coordinator := kafka.NewZookeeperCoordinator(coordinatorConfig)
	consumerConfig := kafka.DefaultConsumerConfig()
	consumerConfig.AutoOffsetReset = kafka.LargestOffset
	consumerConfig.Coordinator = coordinator
	consumerConfig.Groupid = "event-dashboard"
	consumerConfig.ValueDecoder = kafka.NewKafkaAvroDecoder(this.config.SchemaRegistryUrl)
	consumerConfig.WorkerFailureCallback = func(_ *kafka.WorkerManager) kafka.FailedDecision {
		return kafka.CommitOffsetAndContinue
	}
	consumerConfig.WorkerFailedAttemptCallback = func(_ *kafka.Task, _ kafka.WorkerResult) kafka.FailedDecision {
		return kafka.CommitOffsetAndContinue
	}
	consumerConfig.Strategy = func(_ *kafka.Worker, msg *kafka.Message, taskId kafka.TaskId) kafka.WorkerResult {
		if record, ok := msg.DecodedValue.(*avro.GenericRecord); ok {
			this.events <- &Event{
				Topic:      record.Get("topic").(string),
				ConsumerId: record.Get("consumerid").(string),
				Partition:  record.Get("partition").(string),
				EventName:  record.Get("eventname").(string),
				Second:     record.Get("second").(int64),
				Operation:  record.Get("operation").(string),
				Value:      record.Get("value").(int64),
				Cnt:        record.Get("cnt").(int64),
			}
		} else {
			return kafka.NewProcessingFailedResult(taskId)
		}

		return kafka.NewSuccessfulResult(taskId)
	}

	return kafka.NewConsumer(consumerConfig)
}
Esempio n. 7
0
func setupAvroConfig(config *kafka.ConsumerConfig) {
	config.ValueDecoder = kafka.NewKafkaAvroDecoder(*schemaRegistry)
	config.Strategy = avroStrategy
}