func (th *TransmitHandler) run() { defer th.dead.Kill() defer th.producer.Close() defer close(th.bus) transChan := make(chan transmitRequest) trans := Transmit{ch: transChan} for { select { case <-th.kill.Chan(): return case th.bus <- trans: req := <-transChan var key sarama.ByteEncoder if req.key != nil { key = sarama.ByteEncoder(req.key) } message := &sarama.ProducerMessage{ Topic: req.topic, Key: key, Value: sarama.ByteEncoder(req.val), } partition, offset, err := th.producer.SendMessage(message) req.response <- transmitResponse{ partition: partition, offset: offset, err: err, } } } }
// FIXME not fully fault tolerant like SyncPub. func (this *pubStore) AsyncPub(cluster string, topic string, key []byte, msg []byte) (partition int32, offset int64, err error) { this.pubPoolsLock.RLock() pool, present := this.pubPools[cluster] this.pubPoolsLock.RUnlock() if !present { err = store.ErrInvalidCluster return } producer, e := pool.GetAsyncProducer() if e != nil { if producer != nil { producer.Recycle() } err = e return } var keyEncoder sarama.Encoder = nil // will use random partitioner if len(key) > 0 { keyEncoder = sarama.ByteEncoder(key) // will use hash partition } // TODO can be pooled producer.Input() <- &sarama.ProducerMessage{ Topic: topic, Key: keyEncoder, Value: sarama.ByteEncoder(msg), } producer.Recycle() return }
func runProduce(cmd *Command, args []string) { brokers := brokers() config := sarama.NewConfig() config.ClientID = "k produce" config.Producer.Return.Successes = true client, err := sarama.NewClient(brokers, config) must(err) defer client.Close() producer, err := sarama.NewAsyncProducerFromClient(client) must(err) signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) defer close(signals) var wg sync.WaitGroup var enqueued, successes, errors int wg.Add(1) go func() { defer wg.Done() for _ = range producer.Successes() { successes++ } }() wg.Add(1) go func() { defer wg.Done() for err := range producer.Errors() { fmt.Fprintf(os.Stderr, "Failed to produce message: %s\n", err) errors++ } }() scanner := bufio.NewScanner(os.Stdin) producerLoop: for scanner.Scan() { line := scanner.Text() idx := strings.Index(line, "\t") var msg *sarama.ProducerMessage if idx > 0 { msg = &sarama.ProducerMessage{Topic: topic, Key: sarama.ByteEncoder(line[0:idx]), Value: sarama.ByteEncoder(line[idx+1:])} } else { msg = &sarama.ProducerMessage{Topic: topic, Key: nil, Value: sarama.ByteEncoder(line)} } select { case producer.Input() <- msg: enqueued++ case <-signals: break producerLoop } } producer.AsyncClose() wg.Wait() fmt.Fprintf(os.Stderr, "messages produced: %d, errors: %d\n", successes, errors) }
func (this *Mirror) pump(sub *consumergroup.ConsumerGroup, pub sarama.AsyncProducer, stop chan struct{}) { defer func() { log.Println("pump cleanup...") sub.Close() log.Println("pump cleanup ok") stop <- struct{}{} // notify others I'm done }() log.Printf("start pumping") active := false for { select { case <-this.quit: return case <-stop: // yes sir! return case <-time.After(time.Second * 10): active = false log.Println("idle 10s waiting for new msg") case msg := <-sub.Messages(): if !active || this.debug { log.Printf("<-[%d] T:%s M:%s", this.transferN, msg.Topic, string(msg.Value)) } active = true pub.Input() <- &sarama.ProducerMessage{ Topic: msg.Topic, Key: sarama.ByteEncoder(msg.Key), Value: sarama.ByteEncoder(msg.Value), } if this.autoCommit { sub.CommitUpto(msg) } // rate limit, never overflood the limited bandwidth between IDCs // FIXME when compressed, the bandwidth calculation is wrong bytesN := len(msg.Topic) + len(msg.Key) + len(msg.Value) + 20 // payload overhead if !this.bandwidthRateLimiter.Pour(bytesN) { time.Sleep(time.Second) this.Ui.Warn(fmt.Sprintf("%d -> bandwidth reached, backoff 1s", bytesN)) } this.transferBytes += int64(bytesN) this.transferN++ if this.transferN%this.progressStep == 0 { log.Println(gofmt.Comma(this.transferN)) } case err := <-sub.Errors(): this.Ui.Error(err.Error()) // TODO } } }
func (m *message) initProducerMessage() { m.msg = sarama.ProducerMessage{ Metadata: m, Topic: m.topic, Key: sarama.ByteEncoder(m.key), Value: sarama.ByteEncoder(m.value), Timestamp: m.ts, } }
func (k *Producer) Send(topic string, key, data []byte) (int32, int64, error) { msg := &sarama.ProducerMessage{ Topic: topic, Key: sarama.ByteEncoder(key), Value: sarama.ByteEncoder(data), } partition, offset, err := k.SyncProducer.SendMessage(msg) if err != nil { return 0, 0, errors.Trace(err) } return partition, offset, nil }
func (tp *TypedProducer) produce(cmData *CmData) { // logger.Debug("produce requiredAcks=%d", int(tp.requiredAcks)) // fetch and fill pmpe := tp.pmp.fetch() pmpe.privData = cmData pmsg := pmpe.pmsg pmsg.Topic = cmData.topic if len(cmData.key) == 0 { // if key is empty, using sarama.RandomPartitioner pmsg.Key = nil } else { pmsg.Key = sarama.StringEncoder(cmData.key) } pmsg.Value = sarama.ByteEncoder(cmData.data) pmsg.Metadata = pmpe // do produce for { select { case tp.ap.Input() <-pmsg: return case perr := <-tp.ap.Errors(): tp.processProduceErrors(perr) } } }
func BuyHandler(w http.ResponseWriter, r *http.Request) { userID := getUserID(r) order := &Order{ UserID: userID, OrderID: uuid.NewV1().String(), CreatedAt: time.Now().UTC(), } orderJson, _ := json.Marshal(order) pmsg := &sarama.ProducerMessage{ Partition: 0, Topic: "buy", Value: sarama.ByteEncoder(orderJson), } if _, _, err := SyncProducer.SendMessage(pmsg); err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) return } w.WriteHeader(http.StatusOK) w.Header().Set("Content-Type", "application/json") w.Write(orderJson) }
func (w kafkaWriter) send() error { for { ln, err := w.buffer.ReadBytes('\n') if err != nil { if err == io.EOF { break } // TODO: handle these errors? break } message := &sarama.ProducerMessage{ Topic: w.topic, Value: sarama.ByteEncoder(ln), } go func(m *sarama.ProducerMessage) { if _, _, err := w.producer.SendMessage(message); err != nil { if err != nil { // TODO: handle errors, buffer, etc } err = w.writer.(io.Closer).Close() if err != nil { // TODO: handle errors, buffer, etc } } }(message) w.writer.Write(ln) } return nil }
func Produce(Quit chan bool, Host []string, Topic string, Data chan []byte) { client, err := sarama.NewClient("crontab_client", Host, sarama.NewClientConfig()) if err != nil { panic(err) } else { log.Println("kafka producer connected") } defer client.Close() cfg := sarama.NewProducerConfig() cfg.Partitioner = sarama.NewRoundRobinPartitioner producer, err := sarama.NewProducer(client, cfg) if err != nil { panic(err) } defer producer.Close() log.Println("kafka producer ready") for { select { case pack := <-Data: producer.Input() <- &sarama.MessageToSend{Topic: Topic, Key: nil, Value: sarama.ByteEncoder(pack)} case err := <-producer.Errors(): log.Println(err) case <-Quit: break } } }
// Setup prepares the Requester for benchmarking. func (k *kafkaRequester) Setup() error { config := sarama.NewConfig() producer, err := sarama.NewAsyncProducer(k.urls, config) if err != nil { return err } consumer, err := sarama.NewConsumer(k.urls, nil) if err != nil { producer.Close() return err } partitionConsumer, err := consumer.ConsumePartition(k.topic, 0, sarama.OffsetNewest) if err != nil { producer.Close() consumer.Close() return err } k.producer = producer k.consumer = consumer k.partitionConsumer = partitionConsumer k.msg = &sarama.ProducerMessage{ Topic: k.topic, Value: sarama.ByteEncoder(make([]byte, k.payloadSize)), } return nil }
func Serve(producer sarama.SyncProducer, topic string) { for { fmt.Print("x y: ") var x, y int fmt.Scanf("%d %d", &x, &y) m := Multiply{ X: x, Y: y, } jsonMsg, err := json.Marshal(m) if err != nil { log.Fatalln(err) } msg := sarama.ProducerMessage{ Topic: topic, Value: sarama.ByteEncoder(jsonMsg), } partition, offset, err := producer.SendMessage(&msg) if err != nil { log.Fatal(err) } else { fmt.Println("Sent msg to partition:", partition, ", offset:", offset) } } }
func newProducerMessage(cp ChainPartition, payload []byte) *sarama.ProducerMessage { return &sarama.ProducerMessage{ Topic: cp.Topic(), Key: sarama.StringEncoder(strconv.Itoa(int(cp.Partition()))), // TODO Consider writing an IntEncoder? Value: sarama.ByteEncoder(payload), } }
func (i *IndeedKafkaProducer) SendMessages(jobResultChannel <-chan mapping.JobResult) (<-chan error, <-chan int) { errorChannel := make(chan error) kafkaDoneChannel := make(chan int) go func() { defer close(errorChannel) defer close(kafkaDoneChannel) defer i.Close() for jobResult := range jobResultChannel { if jobResult.IsLast() { eatonevents.Debug("received last jobResult. returning from function and signaling that the job is complete.") kafkaDoneChannel <- 0 return } bytes, err := xml.Marshal(jobResult) if err != nil { errorChannel <- err continue } eatonevents.Debug(fmt.Sprintf("Sending JobResult JobKey: %s", jobResult.JobKey)) i.producer.Input() <- &sarama.ProducerMessage{ Topic: eatonconfig.KafkaTopic, Value: sarama.ByteEncoder(bytes), Key: sarama.StringEncoder(jobResult.JobKey), } } }() return errorChannel, kafkaDoneChannel }
func produceNToTopicPartition(t *testing.T, n int, topic string, partition int, brokerAddr string) { client, err := sarama.NewClient("test-client", []string{brokerAddr}, sarama.NewClientConfig()) if err != nil { t.Fatal(err) } defer client.Close() producerConfig := sarama.NewProducerConfig() partitionerFactory := &SaramaPartitionerFactory{NewFixedPartitioner} producerConfig.Partitioner = partitionerFactory.PartitionerConstructor producer, err := sarama.NewProducer(client, producerConfig) encoder := &Int32Encoder{} if err != nil { t.Fatal(err) } defer producer.Close() for i := 0; i < n; i++ { key, _ := encoder.Encode(uint32(partition)) producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: sarama.ByteEncoder(key), Value: sarama.StringEncoder(fmt.Sprintf("test-kafka-message-%d", i))} } select { case e := <-producer.Errors(): t.Fatalf("Failed to produce message: %s", e) case <-time.After(5 * time.Second): } }
func (b *Broker) addNewData(id uint64, ad *AddData) (int32, int64, error) { data := map[string]interface{}{ "id": id, "data": ad.buffer.Bytes(), "method": gBrokerMethod, } wr := ad.msgpWriter err := wr.WriteIntf(data) if err != nil { return 0, 0, errors.New(fmt.Sprintf("fail to msgp.WriteIntf: %s", err.Error())) } wr.Flush() ad.pmsg.Value = sarama.ByteEncoder(ad.msgpBuffer.Bytes()) ad.pmsg.Metadata = ad ad.pmsg.Partition, err = b.getOneWritablePartition() if err != nil { return 0, 0, errors.New(fmt.Sprintf("fail to get one writable partition: %s", err.Error())) } b.produceChan <- ad.pmsg <-ad.brokerDoneChan if ad.brokerErr != nil { return 0, 0, errors.New(fmt.Sprintf("fail to produce: %s", ad.brokerErr.Error())) } return ad.pmsg.Partition, ad.pmsg.Offset, nil }
func (k *Kafka) Send(message []byte) { k.pub.Input() <- &sarama.ProducerMessage{ Topic: k.topic, Key: nil, Value: sarama.ByteEncoder(message), } }
// ElasticsearchMessageEncoder defines the encoding from SubscribeResponse to // sarama.ProducerMessage for Elasticsearch func ElasticsearchMessageEncoder(topic string, key sarama.Encoder, dataset string, message proto.Message) (*sarama.ProducerMessage, error) { response, ok := message.(*pb.SubscribeResponse) if !ok { return nil, UnhandledMessageError{message: message} } update := response.GetUpdate() if update == nil { return nil, UnhandledSubscribeResponseError{response: response} } updateMap, err := openconfig.NotificationToMap(dataset, update, elasticsearch.EscapeFieldName) if err != nil { return nil, err } // Convert time to ms to make Elasticsearch happy updateMap["timestamp"] = updateMap["timestamp"].(int64) / 1000000 updateJSON, err := json.Marshal(updateMap) if err != nil { return nil, err } glog.V(9).Infof("kafka: %s", updateJSON) return &sarama.ProducerMessage{ Topic: topic, Key: key, Value: sarama.ByteEncoder(updateJSON), Metadata: kafka.Metadata{StartTime: time.Unix(0, update.Timestamp), NumMessages: 1}, }, nil }
func (producer AsyncProducer) Publish(message proto.Message) { topic := "test_topic" raw, _ := proto.Marshal(message) producer.delegatee.Input() <- &kafka.ProducerMessage{ Topic: topic, Value: kafka.ByteEncoder(raw), } }
/* * Publish a message into a topic. Will create the topic if it doesn't exist */ func (p *Producer) Publish(topic string, msg []byte) { fmt.Println("[INFO] Publish on topic: ", topic) fmt.Println("[INFO] msg: ", string(msg)) p.producer.Input() <- &sarama.ProducerMessage{ Topic: topic, Value: sarama.ByteEncoder(msg), } }
// SendByteMessage sends a byte slice message to kafka func (self *Kafka) SendByteMessage(msg []byte, event_type string) (partition int32, offset int64, err error) { if _, ok := self.topic[event_type]; !ok { event_type = "default" } message := &sarama.ProducerMessage{Topic: self.topic[event_type], Partition: self.partition} message.Value = sarama.ByteEncoder(msg) return self.producer.SendMessage(message) }
func (k *Peer) sendMessage(message []byte) error { msg := &sarama.ProducerMessage{Topic: topic, Key: nil, Value: sarama.ByteEncoder(message)} _, _, err := k.producer.SendMessage(msg) if nil != err { return err } return nil }
func (k *Peer) sendMessage(message []byte) error { select { case k.producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: nil, Value: sarama.ByteEncoder(message)}: return nil case err := <-k.producer.Errors(): return err.Err } }
func testNewConsumerMessage(cp ChainPartition, offset int64, kafkaMessage *ab.KafkaMessage) *sarama.ConsumerMessage { return &sarama.ConsumerMessage{ Value: sarama.ByteEncoder(utils.MarshalOrPanic(kafkaMessage)), Topic: cp.Topic(), Partition: cp.Partition(), Offset: offset, } }
func (k *kafkaWriter) Write(val []byte) (int, error) { u1 := uuid.NewV4() _, _, err := k.s.SendMessage(&sarama.ProducerMessage{ Topic: "important", Key: sarama.StringEncoder(u1.String()), Value: sarama.ByteEncoder(val), }) return 1, err }
// Sends a spade Event to kafka. func (l *KafkaLogger) Log(e *spade.Event) error { c, err := spade.Marshal(e) if err != nil { return err } hystrix.Go(hystrixCommandName, func() error { return l.Producer.SendMessage(sarama.StringEncoder(e.Uuid), sarama.ByteEncoder(c)) }, nil) return nil }
// PublishRaw will emit the byte array to the Kafka topic. func (p *KafkaPublisher) PublishRaw(key string, m []byte) error { msg := &sarama.ProducerMessage{ Topic: p.topic, Key: sarama.StringEncoder(key), Value: sarama.ByteEncoder(m), } // TODO: do something with this partition/offset values _, _, err := p.producer.SendMessage(msg) return err }
func (this *MirrorMaker) produceRoutine(producer *sarama.Producer, channelIndex int) { for msg := range this.messageChannels[channelIndex] { var key sarama.Encoder if !this.config.PreservePartitions { key = sarama.ByteEncoder(msg.Key) } else { key = Int32Encoder(msg.Partition) } producer.Input() <- &sarama.ProducerMessage{Topic: this.config.TopicPrefix + msg.Topic, Key: key, Value: sarama.ByteEncoder(msg.Value)} } }
func (k *kBroker) Publish(topic string, msg *broker.Message, opts ...broker.PublishOption) error { b, err := k.opts.Codec.Marshal(msg) if err != nil { return err } _, _, err = k.p.SendMessage(&sarama.ProducerMessage{ Topic: topic, Value: sarama.ByteEncoder(b), }) return err }
func pumpData(conf *Config, users chan models.User) { config := sarama.NewConfig() config.Producer.Return.Successes = true producer, err := sarama.NewAsyncProducer(conf.Brokers, config) if err != nil { log.Fatalf("Can't create producer! Err: %v", err) } // Trap SIGINT to trigger a graceful shutdown. signals := make(chan os.Signal, 1) signal.Notify(signals, os.Interrupt) var ( wg sync.WaitGroup enqueued, successes, errors int ) wg.Add(1) go func() { defer wg.Done() for _ = range producer.Successes() { successes++ } }() wg.Add(1) go func() { defer wg.Done() for err := range producer.Errors() { log.Println(err) errors++ } }() ProducerLoop: for user := range users { b, _ := json.Marshal(user) message := &sarama.ProducerMessage{Topic: conf.Topic, Value: sarama.ByteEncoder(b)} select { case producer.Input() <- message: enqueued++ case <-signals: producer.AsyncClose() // Trigger a shutdown of the producer. break ProducerLoop } } producer.AsyncClose() wg.Wait() log.Printf("Successfully produced: %d; errors: %d", successes, errors) }