Example #1
0
func main() {
	err := avro.LoadSchema("./event.avsc")
	if err != nil {
		log.Fatalln("err load schema:", err)
	}
	record, err := avro.NewRecord()
	if err != nil {
		log.Fatalln(err)
	}
	record.Set("id", "www")

	record.Set("event", "")
	record.Set("timestamp", "www")
	record.Set("exchange", nil)
	fmt.Println(record)
	buf := new(bytes.Buffer)
	buf2 := new(bytes.Buffer)
	if err = avro.Encode(buf, record); err != nil {
		log.Fatalln(err)
	}
	if err = avro.Encode(buf2, record); err != nil {
		log.Fatalln(err)
	}
	fmt.Printf("%2x\n", buf2.Bytes())
	decode, err := avro.Decode(buf)
	fmt.Println("Record Name:", decode.Name)
	fmt.Println("Record Fields:")
	for i, field := range decode.Fields {
		fmt.Println(" field", i, field.Name, ":", field.Datum)
	}
}
Example #2
0
func main() {
	err := avro.LoadSchema("./TrackerEvent.avsc")
	if err != nil {
		log.Fatalln("err load schema:", err)
	}
	record, err := avro.NewRecord()
	if err != nil {
		log.Fatalln(err)
	}
	record.Set("device_id", "device_id is here")
	record.Set("app_id", "app_id is here")
	record.Set("ip", "ip is here")
	record.Set("timestamp", time.Now().UTC().Unix())
	log.Println(record)
	buf := new(bytes.Buffer)
	if err = avro.Encode(buf, record); err != nil {
		log.Fatalln(err)
	}
	config := sarama.NewConfig()
	config.Producer.RequiredAcks = sarama.WaitForAll
	config.Producer.Partitioner = sarama.NewHashPartitioner
	message := &sarama.ProducerMessage{Topic: topic, Partition: int32(partition)}
	message.Value = sarama.ByteEncoder(buf.Bytes())
	producer, err := sarama.NewSyncProducer(strings.Split(brokerList, ","), config)
	if err != nil {
		log.Fatalln("err new producer:", err)
	}
	defer func() {
		if err := producer.Close(); err != nil {
			log.Println("Failed to close kafka producer:", err)
		}
	}()
	partition, offset, err := producer.SendMessage(message)
	if err != nil {
		log.Fatalln("err send message:", err)
	}
	log.Printf("topic=%s\tpartition=%d\toffset=%d\n", topic, partition, offset)
}
Example #3
0
func main() {
	err := avro.LoadSchema("./event.avsc")
	if err != nil {
		log.Fatalln("err load schema:", err)
	}
	consumer, err := sarama.NewConsumer(strings.Split(brokerList, ","), nil)
	if err != nil {
		log.Fatalln("err new consumer:", err)
	}
	partitionList, err := consumer.Partitions(topic)
	if err != nil {
		log.Fatalln("err get partition list:", err)
	}
	var (
		messages = make(chan *sarama.ConsumerMessage, bufferSize)
		closing  = make(chan struct{})
		wg       sync.WaitGroup
	)
	go func() {
		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Kill, os.Interrupt)
		<-signals
		log.Println("shutting down consumers")
		close(closing)
	}()

	for _, partition := range partitionList {
		pc, err := consumer.ConsumePartition(topic, partition, offset)
		if err != nil {
			log.Fatalln("err start to consume partition:", partition, ":", err)
		}
		go func(pc sarama.PartitionConsumer) {
			<-closing
			pc.AsyncClose()
		}(pc)
		wg.Add(1)
		go func(pc sarama.PartitionConsumer) {
			defer wg.Done()
			for message := range pc.Messages() {
				messages <- message
			}
		}(pc)
	}
	go func() {
		for msg := range messages {
			_, err := buffer.Write(msg.Value)
			if err != nil {
				log.Fatalln("err write to buffer:", err)
			}
			record, err := avro.Decode(buffer)
			if err != nil {
				log.Fatalln("err decode buffer:", err)
			}
			log.Println(record)
		}
	}()
	wg.Wait()
	log.Println("Done consuming topic:", topic)
	close(messages)
	if err := consumer.Close(); err != nil {
		log.Println("err closing consumer:", err)
	}

}