func mockNewBroker(t *testing.T, cp ChainPartition) (Broker, error) {
	mockBroker := sarama.NewMockBroker(t, testBrokerID)
	handlerMap := make(map[string]sarama.MockResponse)
	// The sarama mock package doesn't allow us to return an error
	// for invalid offset requests, so we return an offset of -1.
	// Note that the mock offset responses below imply a broker with
	// newestOffset-1 blocks available. Therefore, if you are using this
	// broker as part of a bigger test where you intend to consume blocks,
	// make sure that the mockConsumer has been initialized accordingly
	// (Set the 'offset' parameter to newestOffset-1.)
	handlerMap["OffsetRequest"] = sarama.NewMockOffsetResponse(t).
		SetOffset(cp.Topic(), cp.Partition(), sarama.OffsetOldest, testOldestOffset).
		SetOffset(cp.Topic(), cp.Partition(), sarama.OffsetNewest, testNewestOffset)
	mockBroker.SetHandlerByMap(handlerMap)

	broker := sarama.NewBroker(mockBroker.Addr())
	if err := broker.Open(nil); err != nil {
		return nil, fmt.Errorf("Cannot connect to mock broker: %s", err)
	}

	return &mockBrockerImpl{
		brokerImpl: brokerImpl{
			broker: broker,
		},
		mockBroker: mockBroker,
		handlerMap: handlerMap,
	}, nil
}
Exemple #2
0
func TestNewBrokerReturnsPartitionLeader(t *testing.T) {
	cp := newChainPartition(provisional.TestChainID, rawPartition)
	broker1 := sarama.NewMockBroker(t, 1)
	broker2 := sarama.NewMockBroker(t, 2)
	broker3 := sarama.NewMockBroker(t, 3)
	defer func() {
		broker2.Close()
		broker3.Close()
	}()

	// Use broker1 and broker2 as bootstrap brokers, but shutdown broker1 right away
	broker1.Close()

	// Add expectation that broker2 will return a metadata response
	// that identifies broker3 as the topic partition leader
	broker2.SetHandlerByMap(map[string]sarama.MockResponse{
		"MetadataRequest": sarama.NewMockMetadataResponse(t).
			SetBroker(broker1.Addr(), broker1.BrokerID()).
			SetBroker(broker2.Addr(), broker2.BrokerID()).
			SetBroker(broker3.Addr(), broker3.BrokerID()).
			SetLeader(cp.Topic(), cp.Partition(), broker3.BrokerID()),
	})

	// Add expectation that broker3 responds to an offset request
	broker3.SetHandlerByMap(map[string]sarama.MockResponse{
		"OffsetRequest": sarama.NewMockOffsetResponse(t).
			SetOffset(cp.Topic(), cp.Partition(), sarama.OffsetOldest, testOldestOffset).
			SetOffset(cp.Topic(), cp.Partition(), sarama.OffsetNewest, testNewestOffset),
	})

	// Get leader for the test chain partition
	leaderBroker, _ := newBroker([]string{broker1.Addr(), broker2.Addr()}, cp)

	// Only broker3 will respond successfully to an offset request
	offsetRequest := new(sarama.OffsetRequest)
	offsetRequest.AddBlock(cp.Topic(), cp.Partition(), -1, 1)
	if _, err := leaderBroker.GetOffset(cp, offsetRequest); err != nil {
		t.Fatal("Expected leader broker to respond to request:", err)
	}
}
Exemple #3
0
func TestWork(t *testing.T) {
	mb := sarama.NewMockBroker(t, 1)
	mb.Returns(new(sarama.MetadataResponse))

	os.Args = nil
	os.Args = append(os.Args, "gomkafka")
	os.Args = append(os.Args, "client_id")
	os.Args = append(os.Args, mb.Addr())
	os.Args = append(os.Args, "foo_topic")

	err := work()
	// Test empty input case.
	if err == nil {
		t.Errorf("error. %+v", err)
	}
}
Exemple #4
0
func TestProducerConfigMessageMaxBytes(t *testing.T) {
	cp := newChainPartition(provisional.TestChainID, rawPartition)

	broker := sarama.NewMockBroker(t, 1)
	defer func() {
		broker.Close()
	}()
	broker.SetHandlerByMap(map[string]sarama.MockResponse{
		"MetadataRequest": sarama.NewMockMetadataResponse(t).
			SetBroker(broker.Addr(), broker.BrokerID()).
			SetLeader(cp.Topic(), cp.Partition(), broker.BrokerID()),
		"ProduceRequest": sarama.NewMockProduceResponse(t),
	})

	config := newBrokerConfig(testConf.Kafka.Version, rawPartition)
	producer, err := sarama.NewSyncProducer([]string{broker.Addr()}, config)
	if err != nil {
		t.Fatal(err)
	}
	defer func() {
		producer.Close()
	}()

	testCases := []struct {
		name string
		size int
		err  error
	}{
		{"TypicalDeploy", 8 * 1024 * 1024, nil},
		{"TooBig", 100*1024*1024 + 1, sarama.ErrMessageSizeTooLarge},
	}

	for _, tc := range testCases {
		t.Run(tc.name, func(t *testing.T) {
			_, _, err = producer.SendMessage(&sarama.ProducerMessage{
				Topic: cp.Topic(),
				Value: sarama.ByteEncoder(make([]byte, tc.size)),
			})
			if err != tc.err {
				t.Fatal(err)
			}
		})

	}
}
Exemple #5
0
func TestNewBrokerConfig(t *testing.T) {
	// Use a partition ID that is not the 'default' (rawPartition)
	var differentPartition int32 = 2
	cp = newChainPartition(provisional.TestChainID, differentPartition)

	// Setup a mock broker that reports that it has 3 partitions for the topic
	broker := sarama.NewMockBroker(t, 1)
	defer func() {
		broker.Close()
	}()
	broker.SetHandlerByMap(map[string]sarama.MockResponse{
		"MetadataRequest": sarama.NewMockMetadataResponse(t).
			SetBroker(broker.Addr(), broker.BrokerID()).
			SetLeader(cp.Topic(), 0, broker.BrokerID()).
			SetLeader(cp.Topic(), 1, broker.BrokerID()).
			SetLeader(cp.Topic(), 2, broker.BrokerID()),
		"ProduceRequest": sarama.NewMockProduceResponse(t),
	})

	config := newBrokerConfig(testConf.Kafka.Version, differentPartition)
	producer, err := sarama.NewSyncProducer([]string{broker.Addr()}, config)
	if err != nil {
		t.Fatal("Failed to create producer:", err)
	}
	defer func() {
		producer.Close()
	}()

	for i := 0; i < 10; i++ {
		assignedPartition, _, err := producer.SendMessage(&sarama.ProducerMessage{Topic: cp.Topic()})
		if err != nil {
			t.Fatal("Failed to send message:", err)
		}
		if assignedPartition != differentPartition {
			t.Fatalf("Message wasn't posted to the right partition - expected: %d, got %v", differentPartition, assignedPartition)
		}
	}
}
Exemple #6
0
func TestGomkafka(t *testing.T) {
	mb := sarama.NewMockBroker(t, 1)
	mb.Returns(new(sarama.MetadataResponse))

	config := &KafkaConfig{
		"client_id",
		[]string{mb.Addr()},
		"foo_topic",
	}
	_, _, err := Gomkafka(config)
	if err != nil {
		t.Errorf("%+v", err)
	}

	config = &KafkaConfig{
		"client_id",
		[]string{"localhost:8080"},
		"foo_topic",
	}
	_, _, err = Gomkafka(config)
	if err == nil {
		t.Errorf("expected an error, got <nil>")
	}
}
Exemple #7
0
func TestReceivePayloadMessage(t *testing.T) {
	b1 := sarama.NewMockBroker(t, 1)
	b2 := sarama.NewMockBroker(t, 2)
	ctrl := gomock.NewController(t)
	tmpDir, tmpErr := ioutil.TempDir("", "kafkainput-tests")
	if tmpErr != nil {
		t.Errorf("Unable to create a temporary directory: %s", tmpErr)
	}

	defer func() {
		if err := os.RemoveAll(tmpDir); err != nil {
			t.Errorf("Cleanup failed: %s", err)
		}
		ctrl.Finish()
	}()

	topic := "test"
	mdr := new(sarama.MetadataResponse)
	mdr.AddBroker(b2.Addr(), b2.BrokerID())
	mdr.AddTopicPartition(topic, 0, 2)
	b1.Returns(mdr)

	or := new(sarama.OffsetResponse)
	or.AddTopicPartition(topic, 0, 0)
	b2.Returns(or)

	fr := new(sarama.FetchResponse)
	fr.AddMessage(topic, 0, nil, sarama.ByteEncoder([]byte{0x41, 0x42}), 0)
	b2.Returns(fr)

	pConfig := NewPipelineConfig(nil)
	pConfig.Globals.BaseDir = tmpDir
	ki := new(KafkaInput)
	ki.SetName(topic)
	ki.SetPipelineConfig(pConfig)
	config := ki.ConfigStruct().(*KafkaInputConfig)
	config.Addrs = append(config.Addrs, b1.Addr())
	config.Topic = topic

	ith := new(plugins_ts.InputTestHelper)
	ith.Pack = NewPipelinePack(pConfig.InputRecycleChan())
	ith.MockHelper = pipelinemock.NewMockPluginHelper(ctrl)
	ith.MockInputRunner = pipelinemock.NewMockInputRunner(ctrl)

	ith.MockSplitterRunner = pipelinemock.NewMockSplitterRunner(ctrl)

	err := ki.Init(config)
	if err != nil {
		t.Fatalf("%s", err)
	}

	ith.MockInputRunner.EXPECT().NewSplitterRunner("").Return(ith.MockSplitterRunner)
	ith.MockSplitterRunner.EXPECT().UseMsgBytes().Return(false)

	decChan := make(chan func(*PipelinePack), 1)
	decCall := ith.MockSplitterRunner.EXPECT().SetPackDecorator(gomock.Any())
	decCall.Do(func(dec func(pack *PipelinePack)) {
		decChan <- dec
	})

	bytesChan := make(chan []byte, 1)
	splitCall := ith.MockSplitterRunner.EXPECT().SplitBytes(gomock.Any(), nil)
	splitCall.Do(func(recd []byte, del Deliverer) {
		bytesChan <- recd
	})

	errChan := make(chan error)
	go func() {
		errChan <- ki.Run(ith.MockInputRunner, ith.MockHelper)
	}()

	recd := <-bytesChan
	if string(recd) != "AB" {
		t.Errorf("Invalid Payload Expected: AB received: %s", string(recd))
	}

	packDec := <-decChan
	packDec(ith.Pack)
	if ith.Pack.Message.GetType() != "heka.kafka" {
		t.Errorf("Invalid Type %s", ith.Pack.Message.GetType())
	}

	// There is a hang on the consumer close with the mock broker
	// closing the brokers before the consumer works around the issue
	// and is good enough for this test.
	b1.Close()
	b2.Close()

	ki.Stop()
	err = <-errChan
	if err != nil {
		t.Fatal(err)
	}

	filename := filepath.Join(tmpDir, "kafka", "test.test.0.offset.bin")
	if o, err := readCheckpoint(filename); err != nil {
		t.Errorf("Could not read the checkpoint file: %s", filename)
	} else {
		if o != 1 {
			t.Errorf("Incorrect offset Expected: 1 Received: %d", o)
		}
	}
}
func TestSendMessage(t *testing.T) {
	ctrl := gomock.NewController(t)
	broker := sarama.NewMockBroker(t, 2)

	defer func() {
		broker.Close()
		ctrl.Finish()
	}()

	topic := "test"
	globals := DefaultGlobals()
	pConfig := NewPipelineConfig(globals)

	broker.SetHandlerByMap(map[string]sarama.MockResponse{
		"MetadataRequest": sarama.NewMockMetadataResponse(t).
			SetBroker(broker.Addr(), broker.BrokerID()).
			SetLeader(topic, 0, broker.BrokerID()),
		"ProduceRequest": sarama.NewMockProduceResponse(t),
	})

	ko := new(KafkaOutput)
	ko.SetPipelineConfig(pConfig)
	config := ko.ConfigStruct().(*KafkaOutputConfig)
	config.Addrs = append(config.Addrs, broker.Addr())
	config.Topic = topic
	err := ko.Init(config)
	if err != nil {
		t.Fatal(err)
	}
	oth := plugins_ts.NewOutputTestHelper(ctrl)
	encoder := new(plugins.PayloadEncoder)
	encoder.Init(encoder.ConfigStruct().(*plugins.PayloadEncoderConfig))

	inChan := make(chan *PipelinePack, 1)

	msg := pipeline_ts.GetTestMessage()
	pack := NewPipelinePack(pConfig.InputRecycleChan())
	pack.Message = msg

	inChanCall := oth.MockOutputRunner.EXPECT().InChan().AnyTimes()
	inChanCall.Return(inChan)

	errChan := make(chan error)
	startOutput := func() {
		go func() {
			err := ko.Run(oth.MockOutputRunner, oth.MockHelper)
			errChan <- err
		}()
	}

	oth.MockOutputRunner.EXPECT().Encoder().Return(encoder)
	oth.MockOutputRunner.EXPECT().Encode(pack).Return(encoder.Encode(pack))

	outStr := "Write me out to the network"
	pack.Message.SetPayload(outStr)
	startOutput()

	msgcount := atomic.LoadInt64(&ko.processMessageCount)
	if msgcount != 0 {
		t.Errorf("Invalid starting processMessageCount %d", msgcount)
	}
	msgcount = atomic.LoadInt64(&ko.processMessageFailures)
	if msgcount != 0 {
		t.Errorf("Invalid starting processMessageFailures %d", msgcount)
	}

	inChan <- pack
	close(inChan)
	err = <-errChan
	if err != nil {
		t.Errorf("Error running output %s", err)
	}

	msgcount = atomic.LoadInt64(&ko.processMessageCount)
	if msgcount != 1 {
		t.Errorf("Invalid ending processMessageCount %d", msgcount)
	}
	msgcount = atomic.LoadInt64(&ko.processMessageFailures)
	if msgcount != 0 {
		t.Errorf("Invalid ending processMessageFailures %d", msgcount)
	}
}
func TestReceiveProtobufMessage(t *testing.T) {
	broker := sarama.NewMockBroker(t, 2)
	ctrl := gomock.NewController(t)
	tmpDir, tmpErr := ioutil.TempDir("", "kafkainput-tests")
	if tmpErr != nil {
		t.Errorf("Unable to create a temporary directory: %s", tmpErr)
	}

	defer func() {
		if err := os.RemoveAll(tmpDir); err != nil {
			t.Errorf("Cleanup failed: %s", err)
		}
		ctrl.Finish()
	}()

	topic := "test"
	mockFetchResponse := sarama.NewMockFetchResponse(t, 1)
	mockFetchResponse.SetMessage(topic, 0, 0, sarama.ByteEncoder([]byte{0x41, 0x42}))

	broker.SetHandlerByMap(map[string]sarama.MockResponse{
		"MetadataRequest": sarama.NewMockMetadataResponse(t).
			SetBroker(broker.Addr(), broker.BrokerID()).
			SetLeader(topic, 0, broker.BrokerID()),
		"OffsetRequest": sarama.NewMockOffsetResponse(t).
			SetOffset(topic, 0, sarama.OffsetOldest, 0).
			SetOffset(topic, 0, sarama.OffsetNewest, 2),
		"FetchRequest": mockFetchResponse,
	})

	pConfig := NewPipelineConfig(nil)
	pConfig.Globals.BaseDir = tmpDir
	ki := new(KafkaInput)
	ki.SetName(topic)
	ki.SetPipelineConfig(pConfig)
	config := ki.ConfigStruct().(*KafkaInputConfig)
	config.Addrs = append(config.Addrs, broker.Addr())
	config.Topic = topic

	ith := new(plugins_ts.InputTestHelper)
	ith.Pack = NewPipelinePack(pConfig.InputRecycleChan())
	ith.MockHelper = pipelinemock.NewMockPluginHelper(ctrl)
	ith.MockInputRunner = pipelinemock.NewMockInputRunner(ctrl)

	ith.MockSplitterRunner = pipelinemock.NewMockSplitterRunner(ctrl)

	err := ki.Init(config)
	if err != nil {
		t.Fatalf("%s", err)
	}

	ith.MockInputRunner.EXPECT().NewSplitterRunner("").Return(ith.MockSplitterRunner)
	ith.MockSplitterRunner.EXPECT().UseMsgBytes().Return(true)
	ith.MockSplitterRunner.EXPECT().Done()

	bytesChan := make(chan []byte, 1)
	splitCall := ith.MockSplitterRunner.EXPECT().SplitBytes(gomock.Any(), nil)
	splitCall.Do(func(recd []byte, del Deliverer) {
		bytesChan <- recd
	})

	errChan := make(chan error)
	go func() {
		errChan <- ki.Run(ith.MockInputRunner, ith.MockHelper)
	}()

	recd := <-bytesChan
	if string(recd) != "AB" {
		t.Errorf("Invalid MsgBytes Expected: AB received: %s", string(recd))
	}

	// There is a hang on the consumer close with the mock broker
	// closing the brokers before the consumer works around the issue
	// and is good enough for this test.
	broker.Close()

	ki.Stop()
	err = <-errChan
	if err != nil {
		t.Fatal(err)
	}
}