func produceAvro() { config1 := kafka.DefaultProducerConfig() config1.BrokerList = strings.Split(*brokerList, ",") config1.ValueEncoder = kafka.NewKafkaAvroEncoder(*schemaRegistry) config1.AckSuccesses = true producer1 := kafka.NewSaramaProducer(config1) config2 := kafka.DefaultProducerConfig() config2.BrokerList = strings.Split(*brokerList, ",") config2.ValueEncoder = kafka.NewKafkaAvroEncoder(*schemaRegistry) producer2 := kafka.NewSaramaProducer(config2) avroSchema, err := avro.ParseSchemaFile(*avroSchema) if err != nil { panic(err) } _, err = kafka.NewCachedSchemaRegistryClient(*schemaRegistry).Register(avroSchema.GetName()+"-value", avroSchema) if err != nil { panic(err) } decoder := kafka.NewKafkaAvroDecoder(*schemaRegistry) go func() { for message := range producer1.Successes() { rawRecord, err := decoder.Decode(message.Value.([]byte)) if err != nil { panic(err) } record := rawRecord.(*avro.GenericRecord) timings := record.Get("timings").([]interface{}) timings = append(timings, time.Now().UnixNano()/int64(time.Millisecond)) record.Set("timings", timings) producer2.Input() <- &kafka.ProducerMessage{Topic: *topic2, Value: record} } }() for _ = range time.Tick(1 * time.Second) { messagesSent := 0 for messagesSent < *perSecond { record := avro.NewGenericRecord(avroSchema) record.Set("id", int64(0)) record.Set("timings", []int64{time.Now().UnixNano() / int64(time.Millisecond)}) record.Set("value", []byte{}) message := &kafka.ProducerMessage{Topic: *topic1, Value: record} producer1.Input() <- message messagesSent++ } } }
func parseAndValidateArgs() *kafka.SyslogProducerConfig { tag = make(map[string]string) flag.Var(tag, "tag", "") flag.Parse() setLogLevel() runtime.GOMAXPROCS(*maxProcs) if *brokerList == "" { fmt.Println("broker.list is required.") os.Exit(1) } if *topic == "" { fmt.Println("Topic is required.") os.Exit(1) } if *queueSize < 0 { fmt.Println("Queue size should be equal or greater than 0") os.Exit(1) } config := kafka.NewSyslogProducerConfig() conf, err := kafka.ProducerConfigFromFile(*producerConfig) useFile := true if err != nil { //we dont have a producer configuraiton which is ok useFile = false } else { if err = conf.Validate(); err != nil { panic(err) } } if useFile { config.ProducerConfig = conf } else { config.ProducerConfig = kafka.DefaultProducerConfig() config.ProducerConfig.Acks = *requiredAcks config.ProducerConfig.Timeout = time.Duration(*acksTimeout) * time.Millisecond } config.NumProducers = *numProducers config.ChannelSize = *queueSize config.Topic = *topic config.BrokerList = *brokerList config.TCPAddr = fmt.Sprintf("%s:%s", *tcpHost, *tcpPort) config.UDPAddr = fmt.Sprintf("%s:%s", *udpHost, *udpPort) if !(*source == "" && len(tag) == 0 && *logtypeid == math.MinInt64) { config.Transformer = protobufTransformer } return config }
func produceLogLineProtobuf() { config1 := kafka.DefaultProducerConfig() config1.BrokerList = strings.Split(*brokerList, ",") config1.AckSuccesses = true producer1 := kafka.NewSaramaProducer(config1) config2 := kafka.DefaultProducerConfig() config2.BrokerList = strings.Split(*brokerList, ",") producer2 := kafka.NewSaramaProducer(config2) go func() { for message := range producer1.Successes() { line := &sp.LogLine{} proto.Unmarshal(message.Value.([]byte), line) line.Timings = append(line.Timings, time.Now().UnixNano()/int64(time.Millisecond)) bytes, err := proto.Marshal(line) if err != nil { panic(err) } producer2.Input() <- &kafka.ProducerMessage{Topic: *topic2, Value: bytes} } }() for _ = range time.Tick(1 * time.Second) { messagesSent := 0 for messagesSent < *perSecond { line := &sp.LogLine{} line.Line = proto.String("") line.Timings = []int64{time.Now().UnixNano() / int64(time.Millisecond)} bytes, err := proto.Marshal(line) if err != nil { panic(err) } message := &kafka.ProducerMessage{Topic: *topic1, Value: bytes} producer1.Input() <- message messagesSent++ } } }
func parseAndValidateArgs() *kafka.MarathonEventProducerConfig { flag.Parse() setLogLevel() runtime.GOMAXPROCS(*maxProcs) if *brokerList == "" { fmt.Println("broker.list is required.") os.Exit(1) } if *topic == "" { fmt.Println("Topic is required.") os.Exit(1) } config := kafka.NewMarathonEventProducerConfig() conf, err := kafka.ProducerConfigFromFile(*producerConfig) useFile := true if err != nil { //we dont have a producer configuraiton which is ok useFile = false } else { if err = conf.Validate(); err != nil { panic(err) } } if useFile { config.ProducerConfig = conf } else { config.ProducerConfig = kafka.DefaultProducerConfig() config.ProducerConfig.Acks = *requiredAcks config.ProducerConfig.Timeout = time.Duration(*acksTimeout) * time.Millisecond } config.Topic = *topic config.BrokerList = *brokerList config.Port = *port config.Pattern = *pattern config.SchemaRegistryUrl = *registry if config.SchemaRegistryUrl != "" { schema, err := avro.ParseSchemaFile(*avroSchema) if err != nil { fmt.Printf("Could not parse schema file: %s\n", err) os.Exit(1) } config.AvroSchema = schema } return config }
func main() { parseAndValidateArgs() ctrlc := make(chan os.Signal, 1) signal.Notify(ctrlc, os.Interrupt) producerConfig := kafka.DefaultProducerConfig() producerConfig.BrokerList = strings.Split(*brokerList, ",") zkConfig := kafka.NewZookeeperConfig() zkConfig.ZookeeperConnect = strings.Split(*zookeeper, ",") coordinator := kafka.NewZookeeperCoordinator(zkConfig) config := kafka.DefaultConsumerConfig() config.Debug = true config.Groupid = "perf-mirror" config.AutoOffsetReset = "smallest" config.Coordinator = coordinator config.WorkerFailedAttemptCallback = FailedAttemptCallback config.WorkerFailureCallback = FailedCallback if *siesta { config.LowLevelClient = kafka.NewSiestaClient(config) } if protobuf { setupProtoConfig(config) } else { producerConfig.ValueEncoder = kafka.NewKafkaAvroEncoder(*schemaRegistry) setupAvroConfig(config) } producer = kafka.NewSaramaProducer(producerConfig) consumer := kafka.NewConsumer(config) go consumer.StartStatic(map[string]int{*consumeTopic: 1}) <-ctrlc fmt.Println("Shutdown triggered, closing consumer") <-consumer.Close() producer.Close() }