Beispiel #1
0
func GenMessages(c *C, prefix, topic string, keys map[string]int) map[string][]*sarama.ProducerMessage {
	config := NewConfig()
	config.ClientID = "producer"
	config.Kafka.SeedPeers = testKafkaPeers
	producer, err := SpawnGracefulProducer(config)
	c.Assert(err, IsNil)

	messages := make(map[string][]*sarama.ProducerMessage)
	var wg sync.WaitGroup
	var lock sync.Mutex
	for key, count := range keys {
		for i := 0; i < count; i++ {
			key := key
			message := fmt.Sprintf("%s:%s:%d", prefix, key, i)
			spawn(&wg, func() {
				keyEncoder := sarama.StringEncoder(key)
				msgEncoder := sarama.StringEncoder(message)
				prodMsg, err := producer.Produce(topic, keyEncoder, msgEncoder)
				c.Assert(err, IsNil)
				log.Infof("*** produced: topic=%s, partition=%d, offset=%d, message=%s",
					topic, prodMsg.Partition, prodMsg.Offset, message)
				lock.Lock()
				messages[key] = append(messages[key], prodMsg)
				lock.Unlock()
			})
		}
	}
	wg.Wait()
	// Sort the produced messages in ascending order of their offsets.
	for _, keyMessages := range messages {
		sort.Sort(MessageSlice(keyMessages))
	}
	return messages
}
Beispiel #2
0
// handleProduce is an HTTP request handler for `POST /topic/{topic}/messages`
func (as *T) handleProduce(w http.ResponseWriter, r *http.Request) {
	defer r.Body.Close()

	topic := mux.Vars(r)[paramTopic]
	key := getParamBytes(r, paramKey)
	_, isSync := r.Form[paramSync]

	// Get the message body from the HTTP request.
	if _, ok := r.Header[headerContentLength]; !ok {
		errorText := fmt.Sprintf("Missing %s header", headerContentLength)
		respondWithJSON(w, http.StatusBadRequest, errorHTTPResponse{errorText})
		return
	}
	messageSizeStr := r.Header.Get(headerContentLength)
	messageSize, err := strconv.Atoi(messageSizeStr)
	if err != nil {
		errorText := fmt.Sprintf("Invalid %s header: %s", headerContentLength, messageSizeStr)
		respondWithJSON(w, http.StatusBadRequest, errorHTTPResponse{errorText})
		return
	}
	message, err := ioutil.ReadAll(r.Body)
	if err != nil {
		errorText := fmt.Sprintf("Failed to read a message: err=(%s)", err)
		respondWithJSON(w, http.StatusBadRequest, errorHTTPResponse{errorText})
		return
	}
	if len(message) != messageSize {
		errorText := fmt.Sprintf("Message size does not match %s: expected=%v, actual=%v",
			headerContentLength, messageSize, len(message))
		respondWithJSON(w, http.StatusBadRequest, errorHTTPResponse{errorText})
		return
	}

	// Asynchronously submit the message to the Kafka cluster.
	if !isSync {
		as.producer.AsyncProduce(topic, toEncoderPreservingNil(key), sarama.StringEncoder(message))
		respondWithJSON(w, http.StatusOK, EmptyResponse)
		return
	}

	prodMsg, err := as.producer.Produce(topic, toEncoderPreservingNil(key), sarama.StringEncoder(message))
	if err != nil {
		var status int
		switch err {
		case sarama.ErrUnknownTopicOrPartition:
			status = http.StatusNotFound
		default:
			status = http.StatusInternalServerError
		}
		respondWithJSON(w, status, errorHTTPResponse{err.Error()})
		return
	}

	respondWithJSON(w, http.StatusOK, produceHTTPResponse{
		Partition: prodMsg.Partition,
		Offset:    prodMsg.Offset,
	})
}
Beispiel #3
0
func (s *ProducerSuite) TestProduceInvalidTopic(c *C) {
	// Given
	p, _ := Spawn(s.cfg)
	// When
	_, err := p.Produce("no-such-topic", sarama.StringEncoder("1"), sarama.StringEncoder("Foo"))
	// Then
	c.Assert(err, Equals, sarama.ErrUnknownTopicOrPartition)
	// Cleanup
	p.Stop()
}
func (s *GracefulProducerSuite) TestProduceInvalidTopic(c *C) {
	// Given
	kci, _ := SpawnGracefulProducer(s.config)
	// When
	_, err := kci.Produce("no-such-topic", sarama.StringEncoder("1"), sarama.StringEncoder("Foo"))
	// Then
	c.Assert(err, Equals, sarama.ErrUnknownTopicOrPartition)
	// Cleanup
	kci.Stop()
}
Beispiel #5
0
func (s *ProducerSuite) TestProduce(c *C) {
	// Given
	p, _ := Spawn(s.cfg)
	offsetsBefore := s.kh.GetOffsets("test.4")
	// When
	_, err := p.Produce("test.4", sarama.StringEncoder("1"), sarama.StringEncoder("Foo"))
	// Then
	c.Assert(err, IsNil)
	offsetsAfter := s.kh.GetOffsets("test.4")
	c.Assert(offsetsAfter[0], Equals, offsetsBefore[0]+1)
	// Cleanup
	p.Stop()
}
func (s *GracefulProducerSuite) TestProduce(c *C) {
	// Given
	kci, _ := SpawnGracefulProducer(s.config)
	offsetsBefore := s.tkc.getOffsets("test.4")
	// When
	_, err := kci.Produce("test.4", sarama.StringEncoder("1"), sarama.StringEncoder("Foo"))
	// Then
	c.Assert(err, IsNil)
	offsetsAfter := s.tkc.getOffsets("test.4")
	c.Assert(offsetsAfter[0], Equals, offsetsBefore[0]+1)
	// Cleanup
	kci.Stop()
}
Beispiel #7
0
// If `key` of a produced message is empty then it is deterministically
// submitted to a particular partition determined by the empty key hash.
func (s *ProducerSuite) TestAsyncProduceEmptyKey(c *C) {
	// Given
	p, _ := Spawn(s.cfg)
	offsetsBefore := s.kh.GetOffsets("test.4")
	// When
	for i := 0; i < 10; i++ {
		p.AsyncProduce("test.4", sarama.StringEncoder(""), sarama.StringEncoder(strconv.Itoa(i)))
	}
	p.Stop()
	offsetsAfter := s.kh.GetOffsets("test.4")
	// Then
	c.Assert(s.failedMessages(), DeepEquals, []string{})
	c.Assert(offsetsAfter[0], Equals, offsetsBefore[0])
	c.Assert(offsetsAfter[1], Equals, offsetsBefore[1])
	c.Assert(offsetsAfter[2], Equals, offsetsBefore[2])
	c.Assert(offsetsAfter[3], Equals, offsetsBefore[3]+10)
}
Beispiel #8
0
func (kh *KafkaHelper) PutMessages(prefix, topic string, keys map[string]int) map[string][]*sarama.ProducerMessage {
	messages := make(map[string][]*sarama.ProducerMessage)
	var wg sync.WaitGroup
	total := 0
	for key, count := range keys {
		total += count
		for i := 0; i < count; i++ {
			key := key
			message := fmt.Sprintf("%s:%s:%d", prefix, key, i)
			wg.Add(1)
			go func() {
				defer wg.Done()
				keyEncoder := sarama.StringEncoder(key)
				msgEncoder := sarama.StringEncoder(message)
				prodMsg := &sarama.ProducerMessage{
					Topic: topic,
					Key:   keyEncoder,
					Value: msgEncoder,
				}
				kh.producer.Input() <- prodMsg
			}()
		}
	}
	for i := 0; i < total; i++ {
		select {
		case prodMsg := <-kh.producer.Successes():
			key := string(prodMsg.Key.(sarama.StringEncoder))
			messages[key] = append(messages[key], prodMsg)
			log.Infof("*** produced: topic=%s, partition=%d, offset=%d, message=%s",
				topic, prodMsg.Partition, prodMsg.Offset, prodMsg.Value)
		case prodErr := <-kh.producer.Errors():
			kh.c.Error(prodErr)
		}
	}
	// Sort the produced messages in ascending order of their offsets.
	for _, keyMessages := range messages {
		sort.Sort(messageSlice(keyMessages))
	}
	wg.Wait()
	return messages
}
// If `key` is not `nil` then produced messages are deterministically
// distributed between partitions based on the `key` hash.
func (s *GracefulProducerSuite) TestAsyncProduce(c *C) {
	// Given
	kci, _ := SpawnGracefulProducer(s.config)
	offsetsBefore := s.tkc.getOffsets("test.4")
	// When
	for i := 0; i < 10; i++ {
		kci.AsyncProduce("test.4", sarama.StringEncoder("1"), sarama.StringEncoder(strconv.Itoa(i)))
		kci.AsyncProduce("test.4", sarama.StringEncoder("2"), sarama.StringEncoder(strconv.Itoa(i)))
		kci.AsyncProduce("test.4", sarama.StringEncoder("3"), sarama.StringEncoder(strconv.Itoa(i)))
		kci.AsyncProduce("test.4", sarama.StringEncoder("4"), sarama.StringEncoder(strconv.Itoa(i)))
		kci.AsyncProduce("test.4", sarama.StringEncoder("5"), sarama.StringEncoder(strconv.Itoa(i)))
	}
	kci.Stop()
	offsetsAfter := s.tkc.getOffsets("test.4")
	// Then
	c.Assert(s.failedMessages(), DeepEquals, []string{})
	c.Assert(offsetsAfter[0], Equals, offsetsBefore[0]+20)
	c.Assert(offsetsAfter[1], Equals, offsetsBefore[1]+10)
	c.Assert(offsetsAfter[2], Equals, offsetsBefore[2]+10)
	c.Assert(offsetsAfter[3], Equals, offsetsBefore[3]+10)
}
Beispiel #10
0
// If `key` of a produced message is `nil` then it is submitted to a random
// partition. Therefore a batch of such messages is evenly distributed among
// all available partitions.
func (s *ProducerSuite) TestAsyncProduceNilKey(c *C) {
	// Given
	p, _ := Spawn(s.cfg)
	offsetsBefore := s.kh.GetOffsets("test.4")
	// When
	for i := 0; i < 100; i++ {
		p.AsyncProduce("test.4", nil, sarama.StringEncoder(strconv.Itoa(i)))
	}
	p.Stop()
	offsetsAfter := s.kh.GetOffsets("test.4")
	// Then
	c.Assert(s.failedMessages(), DeepEquals, []string{})
	delta0 := offsetsAfter[0] - offsetsBefore[0]
	delta1 := offsetsAfter[1] - offsetsBefore[1]
	if delta0 == 0 || delta1 == 0 {
		panic(fmt.Errorf("Too high imbalance: %v != %v", delta0, delta1))
	}
}
Beispiel #11
0
// Even though wrapped `sarama.Producer` is instructed to stop immediately on
// client stop due to `ShutdownTimeout == 0`, still none of messages is lost.
// because none of them are retries. This test is mostly to increase coverage.
func (s *ProducerSuite) TestTooSmallShutdownTimeout(c *C) {
	// Given
	s.cfg.Producer.ShutdownTimeout = 0
	p, _ := Spawn(s.cfg)
	offsetsBefore := s.kh.GetOffsets("test.4")
	// When
	for i := 0; i < 100; i++ {
		v := sarama.StringEncoder(strconv.Itoa(i))
		p.AsyncProduce("test.4", v, v)
	}
	p.Stop()
	offsetsAfter := s.kh.GetOffsets("test.4")
	// Then
	c.Assert(s.failedMessages(), DeepEquals, []string{})
	delta := int64(0)
	for i := 0; i < 4; i++ {
		delta += offsetsAfter[i] - offsetsBefore[i]
	}
	c.Assert(delta, Equals, int64(100))
}
Beispiel #12
0
func (s *SmartConsumerSuite) compareMsg(consMsg *sarama.ConsumerMessage, prodMsg *sarama.ProducerMessage) bool {
	return sarama.StringEncoder(consMsg.Value) == prodMsg.Value.(sarama.Encoder) && consMsg.Offset == prodMsg.Offset
}
Beispiel #13
0
func assertMsg(c *C, consMsg *sarama.ConsumerMessage, prodMsg *sarama.ProducerMessage) {
	c.Assert(sarama.StringEncoder(consMsg.Value), Equals, prodMsg.Value)
	c.Assert(consMsg.Offset, Equals, prodMsg.Offset)
}
Beispiel #14
0
// toEncoderPreservingNil converts a slice of bytes to `sarama.Encoder` but
// returns `nil` if the passed slice is `nil`.
func toEncoderPreservingNil(b []byte) sarama.Encoder {
	if b != nil {
		return sarama.StringEncoder(b)
	}
	return nil
}