Ejemplo n.º 1
0
func logLineProtoStrategy(_ *kafka.Worker, msg *kafka.Message, id kafka.TaskId) kafka.WorkerResult {
	line := &sp.LogLine{}
	proto.Unmarshal(msg.Value, line)
	timings <- line.Timings

	return kafka.NewSuccessfulResult(id)
}
Ejemplo n.º 2
0
func GetStrategy(consumerId string) func(*kafkaClient.Worker, *kafkaClient.Message, kafkaClient.TaskId) kafkaClient.WorkerResult {
	consumeRate := metrics.NewRegisteredMeter(fmt.Sprintf("%s-ConsumeRate", consumerId), metrics.DefaultRegistry)
	return func(_ *kafkaClient.Worker, msg *kafkaClient.Message, id kafkaClient.TaskId) kafkaClient.WorkerResult {
		kafkaClient.Infof("main", "Got a message: %s", string(msg.Value))
		consumeRate.Mark(1)

		return kafkaClient.NewSuccessfulResult(id)
	}
}
Ejemplo n.º 3
0
func sendAndConsumeGroupsRoutine(t *testing.T, quit chan int) {
	testTopic := fmt.Sprintf("test-groups-%d", time.Now().Unix())
	testGroupId := fmt.Sprintf("test-group1-%d", time.Now().Unix())
	testGroupId2 := fmt.Sprintf("test-group2-%d", time.Now().Unix())
	testMessage := fmt.Sprintf("test-message-%d", time.Now().Unix())

	fmt.Println("Starting sample broker testing")
	go_kafka_client.CreateMultiplePartitionsTopic(zookeepers[0], testTopic, 1)
	go_kafka_client.EnsureHasLeader(zookeepers[0], testTopic)
	kafkaProducer := producer.NewKafkaProducer(testTopic, brokers)
	fmt.Printf("Sending message %s to topic %s\n", testMessage, testTopic)
	err := kafkaProducer.SendStringSync(testMessage)
	if err != nil {
		t.Fatalf("Failed to produce message: %v", err)
		quit <- 1
		quit <- 1
	}

	messageCount := 0
	readFunc := func(consumerId int) go_kafka_client.WorkerStrategy {
		return func(worker *go_kafka_client.Worker, msg *go_kafka_client.Message, taskId go_kafka_client.TaskId) go_kafka_client.WorkerResult {
			message := string(msg.Value)
			if message != testMessage {
				t.Errorf("Produced value %s and consumed value %s do not match.", testMessage, message)
			} else {
				fmt.Printf("Consumer %d successfully consumed a message %s\n", consumerId, message)
			}
			messageCount++
			quit <- 1

			return go_kafka_client.NewSuccessfulResult(taskId)
		}
	}

	consumer1 := createConsumer(testGroupId, readFunc(1))
	fmt.Printf("Trying to consume the message with Consumer 1 and group %s\n", testGroupId)
	go consumer1.StartStatic(map[string]int{
		testTopic: 1,
	})

	consumer2 := createConsumer(testGroupId2, readFunc(2))
	fmt.Printf("Trying to consume the message with Consumer 2 and group %s\n", testGroupId2)
	go consumer2.StartStatic(map[string]int{
		testTopic: 1,
	})
	time.Sleep(timeout)
	if messageCount != 2 {
		t.Errorf("Failed to produce and consume a value within %s", timeout)
	}

	<-consumer1.Close()
	<-consumer2.Close()

	quit <- 1
	quit <- 1
}
Ejemplo n.º 4
0
func avroStrategy(_ *kafka.Worker, msg *kafka.Message, id kafka.TaskId) kafka.WorkerResult {
	record := msg.DecodedValue.(*avro.GenericRecord)

	newTimings := make([]int64, 0)
	for _, timing := range record.Get("timings").([]interface{}) {
		newTimings = append(newTimings, timing.(int64))
	}
	timings <- newTimings

	return kafka.NewSuccessfulResult(id)
}
Ejemplo n.º 5
0
func main() {
	parseArgs()

	go_kafka_client.Logger = go_kafka_client.NewDefaultLogger(go_kafka_client.ErrorLevel)
	kafkaProducer = producer.NewKafkaProducer(writeTopic, []string{broker})

	//Coordinator settings
	zookeeperConfig := go_kafka_client.NewZookeeperConfig()
	zookeeperConfig.ZookeeperConnect = []string{zookeeper}

	//Actual consumer settings
	consumerConfig := go_kafka_client.DefaultConsumerConfig()
	consumerConfig.AutoOffsetReset = go_kafka_client.SmallestOffset
	consumerConfig.Coordinator = go_kafka_client.NewZookeeperCoordinator(zookeeperConfig)
	consumerConfig.Groupid = group
	consumerConfig.NumWorkers = 1
	consumerConfig.NumConsumerFetchers = 1
	consumerConfig.KeyDecoder = go_kafka_client.NewKafkaAvroDecoder(schemaRepo)
	consumerConfig.ValueDecoder = consumerConfig.KeyDecoder

	consumerConfig.Strategy = func(worker *go_kafka_client.Worker, message *go_kafka_client.Message, taskId go_kafka_client.TaskId) go_kafka_client.WorkerResult {
		time.Sleep(2 * time.Second)
		record, ok := message.DecodedValue.(*avro.GenericRecord)
		if !ok {
			panic("Not a *GenericError, but expected one")
		}

		fmt.Printf("golang > received %s\n", fmt.Sprintf("{\"counter\": %d, \"name\": \"%s\", \"uuid\": \"%s\"}", record.Get("counter"), record.Get("name"), record.Get("uuid")))
		modify(record)
		encoded, err := encoder.Encode(record)
		if err != nil {
			panic(err)
		}

		if err := kafkaProducer.SendBytesSync(encoded); err != nil {
			panic(err)
		}

		return go_kafka_client.NewSuccessfulResult(taskId)
	}

	consumerConfig.WorkerFailureCallback = func(_ *go_kafka_client.WorkerManager) go_kafka_client.FailedDecision {
		return go_kafka_client.CommitOffsetAndContinue
	}
	consumerConfig.WorkerFailedAttemptCallback = func(_ *go_kafka_client.Task, _ go_kafka_client.WorkerResult) go_kafka_client.FailedDecision {
		return go_kafka_client.CommitOffsetAndContinue
	}

	kafkaConsumer = go_kafka_client.NewConsumer(consumerConfig)

	pingPongLoop()
}
Ejemplo n.º 6
0
func avroStrategy(_ *kafka.Worker, msg *kafka.Message, id kafka.TaskId) kafka.WorkerResult {
	record := msg.DecodedValue.(*avro.GenericRecord)

	messageTimings := record.Get("timings").([]interface{})
	for _, timing := range msg.DecodedKey.([]int64) {
		messageTimings = append(messageTimings, timing)
	}
	messageTimings = append(messageTimings, time.Now().UnixNano()/int64(time.Millisecond))
	record.Set("timings", messageTimings)

	producer.Input() <- &kafka.ProducerMessage{Topic: *produceTopic, Value: record}

	return kafka.NewSuccessfulResult(id)
}
Ejemplo n.º 7
0
func logLineProtoStrategy(_ *kafka.Worker, msg *kafka.Message, id kafka.TaskId) kafka.WorkerResult {
	line := &sp.LogLine{}
	proto.Unmarshal(msg.Value, line)
	line.Timings = append(line.Timings, msg.DecodedKey.([]int64)...)
	line.Timings = append(line.Timings, time.Now().UnixNano()/int64(time.Millisecond))

	bytes, err := proto.Marshal(line)
	if err != nil {
		panic(err)
	}

	producer.Input() <- &kafka.ProducerMessage{Topic: *produceTopic, Value: bytes}

	return kafka.NewSuccessfulResult(id)
}
Ejemplo n.º 8
0
func sendAndConsumeRoutine(t *testing.T, quit chan int) {
	testTopic := fmt.Sprintf("test-simple-%d", time.Now().Unix())
	testGroupId := fmt.Sprintf("group-%d", time.Now().Unix())
	testMessage := fmt.Sprintf("test-message-%d", time.Now().Unix())

	fmt.Println("Starting sample broker testing")
	go_kafka_client.CreateMultiplePartitionsTopic(zookeepers[0], testTopic, 1)
	go_kafka_client.EnsureHasLeader(zookeepers[0], testTopic)
	kafkaProducer := producer.NewKafkaProducer(testTopic, brokers)
	fmt.Printf("Sending message %s to topic %s\n", testMessage, testTopic)
	err := kafkaProducer.SendStringSync(testMessage)
	if err != nil {
		quit <- 1
		t.Fatalf("Failed to produce message: %v", err)
	}
	kafkaProducer.Close()

	messageConsumed := false
	kafkaConsumer := createConsumer(testGroupId, func(worker *go_kafka_client.Worker, msg *go_kafka_client.Message, taskId go_kafka_client.TaskId) go_kafka_client.WorkerResult {
		message := string(msg.Value)
		if message != testMessage {
			t.Errorf("Produced value %s and consumed value %s do not match.", testMessage, message)
		} else {
			fmt.Printf("Consumer %d successfully consumed a message %s\n", 1, message)
		}
		messageConsumed = true
		quit <- 1

		return go_kafka_client.NewSuccessfulResult(taskId)
	})

	fmt.Printf("Trying to consume the message with group %s\n", testGroupId)
	go kafkaConsumer.StartStatic(map[string]int{
		testTopic: 1,
	})

	time.Sleep(timeout)
	if !messageConsumed {
		t.Errorf("Failed to produce and consume a value within %s", timeout)
	}
	<-kafkaConsumer.Close()
	quit <- 1
}
Ejemplo n.º 9
0
func consumerGroupsMultiplePartitionsRoutine(t *testing.T, quit chan int) {
	topic := fmt.Sprintf("multiple-partition-%d", time.Now().Unix())
	go_kafka_client.CreateMultiplePartitionsTopic(zookeepers[0], topic, 2)
	go_kafka_client.EnsureHasLeader(zookeepers[0], topic)

	kafkaProducer := producer.NewKafkaProducer(topic, brokers)
	totalMessages := 100
	fmt.Printf("Sending %d messages to topic %s\n", totalMessages, topic)
	go func() {
		for i := 0; i < totalMessages; i++ {
			kafkaProducer.SendStringSync(fmt.Sprintf("partitioned %d", i))
		}
	}()

	fmt.Println("consume these messages with a consumer group")
	messageCount := 0
	consumer1 := createConsumer("group1", func(worker *go_kafka_client.Worker, msg *go_kafka_client.Message, taskId go_kafka_client.TaskId) go_kafka_client.WorkerResult {
		fmt.Printf("Consumer group consumed message %s\n", string(msg.Value))
		messageCount++
		return go_kafka_client.NewSuccessfulResult(taskId)
	})
	go consumer1.StartStatic(map[string]int{
		topic: 1,
	})

	<-time.After(timeout / 2)
	if messageCount != totalMessages {
		t.Errorf("Invalid number of messages: expected %d, actual %d", totalMessages, messageCount)
	} else {
		fmt.Printf("Consumed %d messages\n", messageCount)
	}

	//shutdown gracefully
	kafkaProducer.Close()
	consumer1.Close()
	quit <- 1
}
Ejemplo n.º 10
0
func (this *EventFetcher) createConsumer() (*kafka.Consumer, error) {
	fmt.Println(this.config.ZkConnect)
	coordinatorConfig := kafka.NewZookeeperConfig()
	coordinatorConfig.ZookeeperConnect = []string{this.config.ZkConnect}
	coordinator := kafka.NewZookeeperCoordinator(coordinatorConfig)
	consumerConfig := kafka.DefaultConsumerConfig()
	consumerConfig.AutoOffsetReset = kafka.LargestOffset
	consumerConfig.Coordinator = coordinator
	consumerConfig.Groupid = "event-dashboard"
	consumerConfig.ValueDecoder = kafka.NewKafkaAvroDecoder(this.config.SchemaRegistryUrl)
	consumerConfig.WorkerFailureCallback = func(_ *kafka.WorkerManager) kafka.FailedDecision {
		return kafka.CommitOffsetAndContinue
	}
	consumerConfig.WorkerFailedAttemptCallback = func(_ *kafka.Task, _ kafka.WorkerResult) kafka.FailedDecision {
		return kafka.CommitOffsetAndContinue
	}
	consumerConfig.Strategy = func(_ *kafka.Worker, msg *kafka.Message, taskId kafka.TaskId) kafka.WorkerResult {
		if record, ok := msg.DecodedValue.(*avro.GenericRecord); ok {
			this.events <- &Event{
				EventName:     record.Get("eventname").(string),
				Second:        record.Get("second").(int64),
				Framework:     record.Get("framework").(string),
				Latency:       record.Get("latency").(int64),
				ReceivedCount: record.Get("received_count").(int64),
				SentCount:     record.Get("sent_count").(int64),
			}
		} else {
			return kafka.NewProcessingFailedResult(taskId)
		}

		return kafka.NewSuccessfulResult(taskId)
	}

	consumer, err := kafka.NewSlaveConsumer(consumerConfig)
	return consumer, err
}
Ejemplo n.º 11
0
func (this *EventFetcher) createConsumer() *kafka.Consumer {
	coordinatorConfig := kafka.NewZookeeperConfig()
	coordinatorConfig.ZookeeperConnect = []string{this.config.ZkConnect}
	coordinator := kafka.NewZookeeperCoordinator(coordinatorConfig)
	consumerConfig := kafka.DefaultConsumerConfig()
	consumerConfig.AutoOffsetReset = kafka.LargestOffset
	consumerConfig.Coordinator = coordinator
	consumerConfig.Groupid = "event-dashboard"
	consumerConfig.ValueDecoder = kafka.NewKafkaAvroDecoder(this.config.SchemaRegistryUrl)
	consumerConfig.WorkerFailureCallback = func(_ *kafka.WorkerManager) kafka.FailedDecision {
		return kafka.CommitOffsetAndContinue
	}
	consumerConfig.WorkerFailedAttemptCallback = func(_ *kafka.Task, _ kafka.WorkerResult) kafka.FailedDecision {
		return kafka.CommitOffsetAndContinue
	}
	consumerConfig.Strategy = func(_ *kafka.Worker, msg *kafka.Message, taskId kafka.TaskId) kafka.WorkerResult {
		if record, ok := msg.DecodedValue.(*avro.GenericRecord); ok {
			this.events <- &Event{
				Topic:      record.Get("topic").(string),
				ConsumerId: record.Get("consumerid").(string),
				Partition:  record.Get("partition").(string),
				EventName:  record.Get("eventname").(string),
				Second:     record.Get("second").(int64),
				Operation:  record.Get("operation").(string),
				Value:      record.Get("value").(int64),
				Cnt:        record.Get("cnt").(int64),
			}
		} else {
			return kafka.NewProcessingFailedResult(taskId)
		}

		return kafka.NewSuccessfulResult(taskId)
	}

	return kafka.NewConsumer(consumerConfig)
}
Ejemplo n.º 12
0
func handleMessage(consumerId string, channel chan *api.Message) func(*kafkaClient.Worker, *kafkaClient.Message, kafkaClient.TaskId) kafkaClient.WorkerResult {
	return func(_ *kafkaClient.Worker, msg *kafkaClient.Message, id kafkaClient.TaskId) kafkaClient.WorkerResult {
		channel <- api.NewTextMessage(msg.Value)
		return kafkaClient.NewSuccessfulResult(id)
	}
}
Ejemplo n.º 13
0
func consumerGroupsSinglePartitionRoutine(t *testing.T, quit chan int) {
	topic := fmt.Sprintf("single-partition-%d", time.Now().Unix())
	consumerGroup1 := fmt.Sprintf("single-partition-%d", time.Now().Unix())
	go_kafka_client.CreateMultiplePartitionsTopic(zookeepers[0], topic, 1)
	go_kafka_client.EnsureHasLeader(zookeepers[0], topic)

	//create a new producer and send 2 messages to a random topic
	kafkaProducer := producer.NewKafkaProducer(topic, brokers)
	fmt.Printf("Sending message 1 and 2 to topic %s\n", topic)
	kafkaProducer.SendStringSync("1")
	kafkaProducer.SendStringSync("2")

	//create a new consumer and try to consume the 2 produced messages
	waiter1 := make(chan int)
	messageCount1 := 0
	consumer1 := createConsumer(consumerGroup1, func(worker *go_kafka_client.Worker, msg *go_kafka_client.Message, taskId go_kafka_client.TaskId) go_kafka_client.WorkerResult {
		fmt.Printf("Consumed message %s\n", string(msg.Value))
		messageCount1++
		if messageCount1 == 2 {
			waiter1 <- 1
		}

		return go_kafka_client.NewSuccessfulResult(taskId)
	})
	fmt.Printf("Trying to consume messages with Consumer 1 and group %s\n", consumerGroup1)
	go consumer1.StartStatic(map[string]int{
		topic: 1,
	})

	//wait until the messages are consumed or time out after 10 seconds
	select {
	case <-waiter1:
		//wait a bit to commit offset
		time.Sleep(2 * time.Second)
	case <-time.After(timeout):
		t.Errorf("Failed to consume messages with Consumer 1 within %s", timeout)
	}
	consumer1.Close()

	//create one more consumer with the same consumer group and make sure messages are not consumed again
	waiter2 := make(chan int)
	consumer2 := createConsumer(consumerGroup1, func(worker *go_kafka_client.Worker, msg *go_kafka_client.Message, taskId go_kafka_client.TaskId) go_kafka_client.WorkerResult {
		t.Errorf("Consumer 2 consumed a previously consumed message %s\n", string(msg.Value))
		waiter2 <- 1
		return go_kafka_client.NewSuccessfulResult(taskId)
	})
	fmt.Printf("Trying to consume messages with Consumer 2 and group %s\n", consumerGroup1)
	go consumer2.StartStatic(map[string]int{
		topic: 1,
	})

	fmt.Println("wait to make sure messages are not consumed again")
	select {
	case <-waiter2:
	case <-time.After(5 * time.Second):
	}
	consumer2.Close()

	fmt.Println("produce 50 more messages")
	numMessages := 50
	for i := 0; i < numMessages; i++ {
		kafkaProducer.SendStringSync(fmt.Sprintf("message-%d", i))
	}

	fmt.Println("consume these messages with a consumer group")
	//total number of consumed messages should be 50 i.e. no duplicate or missing messages within one group
	messageCount2 := 0
	consumer3 := createConsumer(consumerGroup1, func(worker *go_kafka_client.Worker, msg *go_kafka_client.Message, taskId go_kafka_client.TaskId) go_kafka_client.WorkerResult {
		fmt.Printf("Consumer 1 consumed message %s\n", string(msg.Value))
		messageCount2++
		return go_kafka_client.NewSuccessfulResult(taskId)
	})
	go consumer3.StartStatic(map[string]int{
		topic: 1,
	})

	<-time.After(timeout / 2)
	if messageCount2 != numMessages {
		t.Errorf("Invalid number of messages: expected %d, actual %d", numMessages, messageCount2)
	} else {
		fmt.Printf("Consumed %d messages\n", messageCount2)
	}

	//shutdown gracefully
	kafkaProducer.Close()
	<-consumer1.Close()
	<-consumer2.Close()
	<-consumer3.Close()

	quit <- 1
}