func queue(topic string, p Payload) { for _, record := range p.Records { msg := &sarama.ProducerMessage{ Topic: topic, Value: sarama.StringEncoder(record.Value), Key: sarama.StringEncoder(record.Key), Partition: record.Partition, } producer.Input() <- msg } }
func (s *Server) withAccessLog(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { started := time.Now() next.ServeHTTP(w, r) entry := &accessLogEntry{ Method: r.Method, Host: r.Host, Path: r.RequestURI, IP: r.RemoteAddr, ResponseTime: float64(time.Since(started)) / float64(time.Second), } // We will use the client's IP address as key. This will cause // all the access log entries of the same IP address to end up // on the same partition. s.AccessLogProducer.Input() <- &sarama.ProducerMessage{ Topic: "access_log", Key: sarama.StringEncoder(r.RemoteAddr), Value: entry, } }) }
func TestSyncProducerWithTooManyExpectations(t *testing.T) { trm := newTestReporterMock() sp := NewSyncProducer(trm, nil) sp.ExpectSendMessageAndSucceed() sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers) msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} if _, _, err := sp.SendMessage(msg); err != nil { t.Error("No error expected on first SendMessage call", err) } if err := sp.Close(); err != nil { t.Error(err) } if len(trm.errors) != 1 { t.Error("Expected to report an error") } }
func TestSyncProducerReturnsExpectationsToSendMessage(t *testing.T) { sp := NewSyncProducer(t, nil) defer func() { if err := sp.Close(); err != nil { t.Error(err) } }() sp.ExpectSendMessageAndSucceed() sp.ExpectSendMessageAndSucceed() sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers) msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")} _, offset, err := sp.SendMessage(msg) if err != nil { t.Errorf("The first message should have been produced successfully, but got %s", err) } if offset != 1 || offset != msg.Offset { t.Errorf("The first message should have been assigned offset 1, but got %d", msg.Offset) } _, offset, err = sp.SendMessage(msg) if err != nil { t.Errorf("The second message should have been produced successfully, but got %s", err) } if offset != 2 || offset != msg.Offset { t.Errorf("The second message should have been assigned offset 2, but got %d", offset) } _, _, err = sp.SendMessage(msg) if err != sarama.ErrOutOfBrokers { t.Errorf("The third message should not have been produced successfully") } if err := sp.Close(); err != nil { t.Error(err) } }
func (s *Server) collectQueryStringData() http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/" { http.NotFound(w, r) return } // We are not setting a message key, which means that all messages will // be distributed randomly over the different partitions. partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{ Topic: "important", Value: sarama.StringEncoder(r.URL.RawQuery), }) if err != nil { w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "Failed to store your data:, %s", err) } else { // The tuple (topic, partition, offset) can be used as a unique identifier // for a message in a Kafka cluster. fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset) } }) }
func main() { flag.Parse() if *brokerList == "" { printUsageErrorAndExit("no -brokers specified. Alternatively, set the KAFKA_PEERS environment variable") } if *topic == "" { printUsageErrorAndExit("no -topic specified") } if *verbose { sarama.Logger = logger } config := sarama.NewConfig() config.Producer.RequiredAcks = sarama.WaitForAll switch *partitioner { case "": if *partition >= 0 { config.Producer.Partitioner = sarama.NewManualPartitioner } else { config.Producer.Partitioner = sarama.NewHashPartitioner } case "hash": config.Producer.Partitioner = sarama.NewHashPartitioner case "random": config.Producer.Partitioner = sarama.NewRandomPartitioner case "manual": config.Producer.Partitioner = sarama.NewManualPartitioner if *partition == -1 { printUsageErrorAndExit("-partition is required when partitioning manually") } default: printUsageErrorAndExit(fmt.Sprintf("Partitioner %s not supported.", *partitioner)) } message := &sarama.ProducerMessage{Topic: *topic, Partition: int32(*partition)} if *key != "" { message.Key = sarama.StringEncoder(*key) } if *value != "" { message.Value = sarama.StringEncoder(*value) } else if stdinAvailable() { bytes, err := ioutil.ReadAll(os.Stdin) if err != nil { printErrorAndExit(66, "Failed to read data from the standard input: %s", err) } message.Value = sarama.ByteEncoder(bytes) } else { printUsageErrorAndExit("-value is required, or you have to provide the value on stdin") } producer, err := sarama.NewSyncProducer(strings.Split(*brokerList, ","), config) if err != nil { printErrorAndExit(69, "Failed to open Kafka producer: %s", err) } defer func() { if err := producer.Close(); err != nil { logger.Println("Failed to close Kafka producer cleanly:", err) } }() partition, offset, err := producer.SendMessage(message) if err != nil { printErrorAndExit(69, "Failed to produce message: %s", err) } else if !*silent { fmt.Printf("topic=%s\tpartition=%d\toffset=%d\n", *topic, partition, offset) } }