func NewKafkaDeliver(store *Store, clientId string, brokerList []string) (*KafkaDeliver, error) { log.Println("go=kafka at=new-kafka-deliver") clientConfig := sarama.NewClientConfig() producerConfig := sarama.NewProducerConfig() client, err := sarama.NewClient(clientId, brokerList, clientConfig) if err != nil { return nil, err } log.Println("go=kafka at=created-client") producer, err := sarama.NewProducer(client, producerConfig) if err != nil { return nil, err } log.Println("go=kafka at=created-producer") return &KafkaDeliver{ clientId: clientId, brokerList: brokerList, store: store, producer: producer, producerConfig: producerConfig, client: client, clientConfig: clientConfig, deliverGoroutines: 8, shutdownDeliver: make(chan bool, 8), shutdown: make(chan bool, 8), }, nil }
func produceNToTopicPartition(t *testing.T, n int, topic string, partition int, brokerAddr string) { client, err := sarama.NewClient("test-client", []string{brokerAddr}, sarama.NewClientConfig()) if err != nil { t.Fatal(err) } defer client.Close() producerConfig := sarama.NewProducerConfig() partitionerFactory := &SaramaPartitionerFactory{NewFixedPartitioner} producerConfig.Partitioner = partitionerFactory.PartitionerConstructor producer, err := sarama.NewProducer(client, producerConfig) encoder := &Int32Encoder{} if err != nil { t.Fatal(err) } defer producer.Close() for i := 0; i < n; i++ { key, _ := encoder.Encode(uint32(partition)) producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: sarama.ByteEncoder(key), Value: sarama.StringEncoder(fmt.Sprintf("test-kafka-message-%d", i))} } select { case e := <-producer.Errors(): t.Fatalf("Failed to produce message: %s", e) case <-time.After(5 * time.Second): } }
func Produce(Quit chan bool, Host []string, Topic string, Data chan []byte) { client, err := sarama.NewClient("crontab_client", Host, sarama.NewClientConfig()) if err != nil { panic(err) } else { log.Println("kafka producer connected") } defer client.Close() cfg := sarama.NewProducerConfig() cfg.Partitioner = sarama.NewRoundRobinPartitioner producer, err := sarama.NewProducer(client, cfg) if err != nil { panic(err) } defer producer.Close() log.Println("kafka producer ready") for { select { case pack := <-Data: producer.Input() <- &sarama.MessageToSend{Topic: Topic, Key: nil, Value: sarama.ByteEncoder(pack)} case err := <-producer.Errors(): log.Println(err) case <-Quit: break } } }
func NewKafka(numberOfMessages int, testLatency bool) *Kafka { pubClient, _ := sarama.NewClient("pub", []string{"localhost:9092"}, sarama.NewClientConfig()) subClient, _ := sarama.NewClient("sub", []string{"localhost:9092"}, sarama.NewClientConfig()) topic := "test" pub, _ := sarama.NewProducer(pubClient, sarama.NewProducerConfig()) consumerConfig := sarama.NewConsumerConfig() consumerConfig.OffsetMethod = sarama.OffsetMethodNewest // Only read new messages consumerConfig.DefaultFetchSize = 10 * 1024 * 1024 sub, _ := sarama.NewConsumer(subClient, topic, 0, "test", consumerConfig) var handler benchmark.MessageHandler if testLatency { handler = &benchmark.LatencyMessageHandler{ NumberOfMessages: numberOfMessages, Latencies: []float32{}, } } else { handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages} } return &Kafka{ handler: handler, pubClient: pubClient, subClient: subClient, pub: pub, sub: sub, topic: topic, } }
func (this *MirrorMaker) startProducers() { for i := 0; i < this.config.NumProducers; i++ { conf, err := ProducerConfigFromFile(this.config.ProducerConfig) if err != nil { panic(err) } if err = conf.Validate(); err != nil { panic(err) } client, err := sarama.NewClient(conf.Clientid, conf.BrokerList, sarama.NewClientConfig()) if err != nil { panic(err) } config := sarama.NewProducerConfig() config.ChannelBufferSize = conf.SendBufferSize switch strings.ToLower(conf.CompressionCodec) { case "none": config.Compression = sarama.CompressionNone case "gzip": config.Compression = sarama.CompressionGZIP case "snappy": config.Compression = sarama.CompressionSnappy } config.FlushByteCount = conf.FlushByteCount config.FlushFrequency = conf.FlushTimeout config.FlushMsgCount = conf.BatchSize config.MaxMessageBytes = conf.MaxMessageBytes config.MaxMessagesPerReq = conf.MaxMessagesPerRequest if this.config.PreservePartitions { config.Partitioner = NewIntPartitioner } else { config.Partitioner = sarama.NewRandomPartitioner } config.RequiredAcks = sarama.RequiredAcks(conf.Acks) config.RetryBackoff = conf.RetryBackoff config.Timeout = conf.Timeout producer, err := sarama.NewProducer(client, config) if err != nil { panic(err) } this.producers = append(this.producers, producer) if this.config.PreserveOrder { go this.produceRoutine(producer, i) } else { go this.produceRoutine(producer, 0) } } }
// NewKafkaProducer creates a new produce. It will publish messages to the given topic. // You may also provide a sarama.ProducerConfig with more precise configurations or nil to use default configuration func NewKafkaProducer(topic string, brokerList []string) *KafkaProducer { client, err := sarama.NewClient(uuid.New(), brokerList, sarama.NewClientConfig()) if err != nil { panic(err) } config := sarama.NewProducerConfig() config.FlushMsgCount = 1 config.AckSuccesses = true producer, err := sarama.NewProducer(client, config) if err != nil { panic(err) } return &KafkaProducer{topic, brokerList, client, producer} }
func NewSaramaProducer(conf *ProducerConfig) Producer { if err := conf.Validate(); err != nil { panic(err) } client, err := sarama.NewClient(conf.Clientid, conf.BrokerList, sarama.NewClientConfig()) if err != nil { panic(err) } config := sarama.NewProducerConfig() config.ChannelBufferSize = conf.SendBufferSize switch strings.ToLower(conf.CompressionCodec) { case "none": config.Compression = sarama.CompressionNone case "gzip": config.Compression = sarama.CompressionGZIP case "snappy": config.Compression = sarama.CompressionSnappy } config.FlushByteCount = conf.FlushByteCount config.FlushFrequency = conf.FlushTimeout config.FlushMsgCount = conf.BatchSize config.MaxMessageBytes = conf.MaxMessageBytes config.MaxMessagesPerReq = conf.MaxMessagesPerRequest config.RequiredAcks = sarama.RequiredAcks(conf.Acks) config.RetryBackoff = conf.RetryBackoff config.Timeout = conf.Timeout config.AckSuccesses = conf.AckSuccesses partitionerFactory := &SaramaPartitionerFactory{conf.Partitioner} config.Partitioner = partitionerFactory.PartitionerConstructor producer, err := sarama.NewProducer(client, config) if err != nil { panic(err) } saramaProducer := &SaramaProducer{ saramaProducer: producer, config: conf, } saramaProducer.initSuccesses() saramaProducer.initErrors() saramaProducer.initInput() return saramaProducer }
// 构造kafka producer client func makeProducer(addr []string) (producer *sarama.Producer, err error) { clientConfig := &sarama.ClientConfig{MetadataRetries: 3, WaitForElection: 250 * time.Millisecond} client, err := sarama.NewClient("client_id", addr, clientConfig) if err != nil { return } producer, err = sarama.NewProducer(client, makeConfig()) if err != nil { return } defer producer.Close() return }
// NewProducer creates a new Producer using the given client, topic and configuration. func NewProducer(client *sarama.Client, topic string, config *sarama.ProducerConfig) (*Producer, error) { if topic == "" { return nil, sarama.ConfigurationError("Empty topic") } prod, err := sarama.NewProducer(client, config) if err != nil { return nil, err } sp := &Producer{ producer: prod, topic: topic, newExpectations: make(chan *producerExpect), // this must be unbuffered client: client, } return sp, nil }
func main() { var fromStart = flag.Bool("fromStart", true, "Read from beginning of file") var topic = flag.String("topic", "tailf", "Kafka topic to produce to") var clientId = flag.String("clientId", "tailf-client", "Kafka client ID") var brokerList = flag.String("brokerList", "127.0.0.1:9092", "Kafka broker list, comma-delimited.") var verbose = flag.Bool("verbose", false, "Verbose output") flag.Parse() if len(flag.Args()) != 1 { flag.Usage() os.Exit(1) } var filename = flag.Arg(0) follower, err := tailf.Follow(filename, *fromStart) if err != nil { log.Fatalf("couldn't follow %q: %v", filename, err) } defer follower.Close() clientConfig := sarama.NewClientConfig() client, err := sarama.NewClient(*clientId, strings.Split(*brokerList, ","), clientConfig) if err != nil { panic(err) } defer client.Close() producerConfig := sarama.NewProducerConfig() producer, err := sarama.NewProducer(client, producerConfig) if err != nil { panic(err) } defer producer.Close() scanner := bufio.NewScanner(follower) for scanner.Scan() { producer.Input() <- &sarama.ProducerMessage{Topic: *topic, Key: nil, Value: sarama.ByteEncoder(scanner.Bytes())} if *verbose { log.Println("Produced message:", scanner.Text()) } } if err := scanner.Err(); err != nil { log.Fatalf("scanner error: %v", err) } }
func setUpProducer(host string, port int, mode string) { connection := host + ":" + strconv.Itoa(port) log.Info("Connecting to Kafka on " + connection + "...") clientConfig := sarama.NewClientConfig() clientConfig.WaitForElection = (10 * time.Second) client, err := sarama.NewClient("client_id", []string{connection}, clientConfig) if err != nil { panic(err) } else { log.Info("Connection to Kafka successful") } /** * Create a producer with some specific setting */ producerConfig := sarama.NewProducerConfig() // if delivering messages async, buffer them for at most MaxBufferTime producerConfig.MaxBufferTime = (2 * time.Second) // max bytes in buffer producerConfig.MaxBufferedBytes = 51200 // Use zip compression producerConfig.Compression = 0 // We are just streaming metrics, so don't not wait for any Kafka Acks. producerConfig.RequiredAcks = -1 producer, err := sarama.NewProducer(client, producerConfig) if err != nil { panic(err) } go pushMetrics(producer, mode) }
func produceN(t *testing.T, n int, topic string, brokerAddr string) { client, err := sarama.NewClient("test-client", []string{brokerAddr}, sarama.NewClientConfig()) if err != nil { t.Fatal(err) } defer client.Close() producer, err := sarama.NewProducer(client, sarama.NewProducerConfig()) if err != nil { t.Fatal(err) } defer producer.Close() for i := 0; i < n; i++ { producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: nil, Value: sarama.StringEncoder(fmt.Sprintf("test-kafka-message-%d", i))} } select { case e := <-producer.Errors(): t.Fatalf("Failed to produce message: %s", e) case <-time.After(5 * time.Second): } }
func (s *scribeServiceImplementation) Log(messages []*scribe.LogEntry) (scribe.ResultCode, error) { client, err := sarama.NewClient("client_id", []string{kafka_hostname}, &sarama.ClientConfig{MetadataRetries: 1, WaitForElection: 250 * time.Millisecond}) if err != nil { log.Println(err) } else { log.Printf("Connected to %s\n", kafka_hostname) } defer client.Close() producer, err := sarama.NewProducer(client, &sarama.ProducerConfig{ RequiredAcks: sarama.WaitForLocal, MaxBufferTime: uint32(buffer_time), MaxBufferedBytes: uint32((len(messages[0].Message) * buffer_size) - 1), }) if err != nil { log.Println(err) } defer producer.Close() success := 0 errors := 0 startTime := time.Now() for _, m := range messages { log.Printf("Message Received: %+v\n", m) err = producer.SendMessage(m.Category, nil, sarama.StringEncoder(m.Message)) if err != nil { errors++ log.Println(err) } else { success++ } } endTime := time.Now() log.Printf("Sent %d messages in %0.2d ms with %d errors", success, ((endTime.Sub(startTime)) / 1e6), errors) if errors > 0 { return scribe.ResultCodeTryLater, nil } else { return scribe.ResultCodeOk, nil } }
func main() { client, err := kafka.NewClient("client_id", []string{"localhost:9092"}, kafka.NewClientConfig()) if err != nil { panic(err) } else { fmt.Println("> connected") } defer client.Close() producer, err := kafka.NewProducer(client, nil) if err != nil { panic(err) } defer producer.Close() err = producer.SendMessage("my_topic", nil, kafka.StringEncoder("testing 123")) if err != nil { panic(err) } else { fmt.Println("> message sent") } }
func (this *SyslogProducer) startProducers() { for i := 0; i < this.config.NumProducers; i++ { conf := this.config.ProducerConfig brokerList := strings.Split(this.config.BrokerList, ",") client, err := sarama.NewClient(conf.Clientid, brokerList, sarama.NewClientConfig()) if err != nil { panic(err) } config := sarama.NewProducerConfig() config.ChannelBufferSize = conf.SendBufferSize switch strings.ToLower(conf.CompressionCodec) { case "none": config.Compression = sarama.CompressionNone case "gzip": config.Compression = sarama.CompressionGZIP case "snappy": config.Compression = sarama.CompressionSnappy } config.FlushByteCount = conf.FlushByteCount config.FlushFrequency = conf.FlushTimeout config.FlushMsgCount = conf.BatchSize config.MaxMessageBytes = conf.MaxMessageBytes config.MaxMessagesPerReq = conf.MaxMessagesPerRequest config.Partitioner = sarama.NewRandomPartitioner config.RequiredAcks = sarama.RequiredAcks(conf.Acks) config.RetryBackoff = conf.RetryBackoff config.Timeout = conf.Timeout Tracef(this, "Starting new producer with config: %#v", config) producer, err := sarama.NewProducer(client, config) if err != nil { panic(err) } this.producers = append(this.producers, producer) go this.produceRoutine(producer) } }
func produce(t *testing.T, messages []string, topic string, brokerAddr string, compression sarama.CompressionCodec) { client, err := sarama.NewClient("test-client", []string{brokerAddr}, sarama.NewClientConfig()) if err != nil { t.Fatal(err) } defer client.Close() producerConfig := sarama.NewProducerConfig() producerConfig.Compression = compression producer, err := sarama.NewProducer(client, producerConfig) if err != nil { t.Fatal(err) } defer producer.Close() for _, message := range messages { producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: nil, Value: sarama.StringEncoder(message)} } select { case e := <-producer.Errors(): t.Fatalf("Failed to produce message: %s", e) case <-time.After(5 * time.Second): } }
func TestSuite(t *testing.T) { RegisterFailHandler(Fail) client, err := sarama.NewClient("sarama-cluster-client", []string{"127.0.0.1:29092"}, &sarama.ClientConfig{ MetadataRetries: 30, WaitForElection: time.Second, }) checkOrFail(t, err) defer client.Close() producer, err := sarama.NewProducer(client, &sarama.ProducerConfig{ Partitioner: sarama.NewHashPartitioner(), MaxBufferedBytes: 1024 * 1024, MaxBufferTime: 1000, }) checkOrFail(t, err) defer producer.Close() for i := 0; i < 10000; i++ { checkOrFail(t, producer.SendMessage(tnT, nil, sarama.ByteEncoder([]byte("PLAINDATA")))) } RunSpecs(t, "sarama/cluster") }
func (k *KafkaOutput) Init(config interface{}) (err error) { k.config = config.(*KafkaOutputConfig) if len(k.config.Addrs) == 0 { return errors.New("addrs must have at least one entry") } k.cconfig = sarama.NewClientConfig() k.cconfig.MetadataRetries = k.config.MetadataRetries k.cconfig.WaitForElection = time.Duration(k.config.WaitForElection) * time.Millisecond k.cconfig.BackgroundRefreshFrequency = time.Duration(k.config.BackgroundRefreshFrequency) * time.Millisecond k.cconfig.DefaultBrokerConf = sarama.NewBrokerConfig() k.cconfig.DefaultBrokerConf.MaxOpenRequests = k.config.MaxOpenRequests k.cconfig.DefaultBrokerConf.DialTimeout = time.Duration(k.config.DialTimeout) * time.Millisecond k.cconfig.DefaultBrokerConf.ReadTimeout = time.Duration(k.config.ReadTimeout) * time.Millisecond k.cconfig.DefaultBrokerConf.WriteTimeout = time.Duration(k.config.WriteTimeout) * time.Millisecond k.pconfig = sarama.NewProducerConfig() switch k.config.Partitioner { case "Random": k.pconfig.Partitioner = sarama.NewRandomPartitioner() if len(k.config.HashVariable) > 0 { return fmt.Errorf("hash_variable should not be set for the %s partitioner", k.config.Partitioner) } case "RoundRobin": k.pconfig.Partitioner = new(sarama.RoundRobinPartitioner) if len(k.config.HashVariable) > 0 { return fmt.Errorf("hash_variable should not be set for the %s partitioner", k.config.Partitioner) } case "Hash": k.pconfig.Partitioner = sarama.NewHashPartitioner() if k.hashVariable = verifyMessageVariable(k.config.HashVariable); k.hashVariable == nil { return fmt.Errorf("invalid hash_variable: %s", k.config.HashVariable) } default: return fmt.Errorf("invalid partitioner: %s", k.config.Partitioner) } if len(k.config.Topic) == 0 { if k.topicVariable = verifyMessageVariable(k.config.TopicVariable); k.topicVariable == nil { return fmt.Errorf("invalid topic_variable: %s", k.config.TopicVariable) } } else if len(k.config.TopicVariable) > 0 { return errors.New("topic and topic_variable cannot both be set") } switch k.config.RequiredAcks { case "NoResponse": k.pconfig.RequiredAcks = sarama.NoResponse case "WaitForLocal": k.pconfig.RequiredAcks = sarama.WaitForLocal case "WaitForAll": k.pconfig.RequiredAcks = sarama.WaitForAll default: return fmt.Errorf("invalid required_acks: %s", k.config.RequiredAcks) } k.pconfig.Timeout = time.Duration(k.config.Timeout) * time.Millisecond switch k.config.CompressionCodec { case "None": k.pconfig.Compression = sarama.CompressionNone case "GZIP": k.pconfig.Compression = sarama.CompressionGZIP case "Snappy": k.pconfig.Compression = sarama.CompressionSnappy default: return fmt.Errorf("invalid compression_codec: %s", k.config.CompressionCodec) } k.pconfig.MaxBufferedBytes = k.config.MaxBufferedBytes k.pconfig.MaxBufferTime = time.Duration(k.config.MaxBufferTime) * time.Millisecond k.pconfig.BackPressureThresholdBytes = k.config.BackPressureThresholdBytes k.client, err = sarama.NewClient(k.config.Id, k.config.Addrs, k.cconfig) if err != nil { return } k.producer, err = sarama.NewProducer(k.client, k.pconfig) return }
func main() { fmt.Println(("Starting Producer")) runtime.GOMAXPROCS(runtime.NumCPU()) numMessage := 0 brokerConnect, topic, sleepTime, graphiteConnect, graphiteFlushInterval, flushMsgCount, flushFrequency, producerCount, maxMessagesPerReq := resolveConfig() _ = graphiteConnect _ = graphiteFlushInterval startMetrics(graphiteConnect, graphiteFlushInterval) produceRate := metrics.NewRegisteredMeter("ProduceRate", metrics.DefaultRegistry) //kafkaClient.CreateMultiplePartitionsTopic(zkConnect, topic, numPartitions) //p := producer.NewKafkaProducer(topic, []string{brokerConnect}) //defer producer.Close() //defer p.Close() saramaError := make(chan *sarama.ProducerError) saramaSuccess := make(chan *sarama.ProducerMessage) for i := 0; i < producerCount; i++ { client, err := sarama.NewClient(uuid.New(), []string{brokerConnect}, sarama.NewClientConfig()) if err != nil { panic(err) } config := sarama.NewProducerConfig() config.FlushMsgCount = flushMsgCount config.FlushFrequency = flushFrequency config.AckSuccesses = true config.RequiredAcks = sarama.NoResponse //WaitForAll config.MaxMessagesPerReq = maxMessagesPerReq config.Timeout = 1000 * time.Millisecond // config.Compression = 2 producer, err := sarama.NewProducer(client, config) go func() { if err != nil { panic(err) } for { message := &sarama.ProducerMessage{Topic: topic, Key: sarama.StringEncoder(fmt.Sprintf("%d", numMessage)), Value: sarama.StringEncoder(fmt.Sprintf("message %d!", numMessage))} numMessage++ producer.Input() <- message time.Sleep(sleepTime) } }() go func() { for { select { case error := <-producer.Errors(): saramaError <- error case success := <-producer.Successes(): saramaSuccess <- success } } }() } ctrlc := make(chan os.Signal, 1) signal.Notify(ctrlc, os.Interrupt) go func() { start := time.Now() count := 0 for { select { case err := <-saramaError: fmt.Println(err) case <-saramaSuccess: produceRate.Mark(1) count++ elapsed := time.Since(start) if elapsed.Seconds() >= 1 { fmt.Println(fmt.Sprintf("Per Second %d", count)) count = 0 start = time.Now() } } } }() <-ctrlc }