func NewExclusivePartitioner(topic string) sarama.Partitioner { this := &exclusivePartitioner{ hasher: sarama.NewHashPartitioner(topic), } return this }
func TestSuite(t *testing.T) { RegisterFailHandler(Fail) client, err := sarama.NewClient("sarama-cluster-client", []string{"127.0.0.1:29092"}, &sarama.ClientConfig{ MetadataRetries: 30, WaitForElection: time.Second, }) checkOrFail(t, err) defer client.Close() producer, err := sarama.NewProducer(client, &sarama.ProducerConfig{ Partitioner: sarama.NewHashPartitioner(), MaxBufferedBytes: 1024 * 1024, MaxBufferTime: 1000, }) checkOrFail(t, err) defer producer.Close() for i := 0; i < 10000; i++ { checkOrFail(t, producer.SendMessage(tnT, nil, sarama.ByteEncoder([]byte("PLAINDATA")))) } RunSpecs(t, "sarama/cluster") }
func (k *KafkaOutput) Init(config interface{}) (err error) { k.config = config.(*KafkaOutputConfig) if len(k.config.Addrs) == 0 { return errors.New("addrs must have at least one entry") } k.cconfig = sarama.NewClientConfig() k.cconfig.MetadataRetries = k.config.MetadataRetries k.cconfig.WaitForElection = time.Duration(k.config.WaitForElection) * time.Millisecond k.cconfig.BackgroundRefreshFrequency = time.Duration(k.config.BackgroundRefreshFrequency) * time.Millisecond k.cconfig.DefaultBrokerConf = sarama.NewBrokerConfig() k.cconfig.DefaultBrokerConf.MaxOpenRequests = k.config.MaxOpenRequests k.cconfig.DefaultBrokerConf.DialTimeout = time.Duration(k.config.DialTimeout) * time.Millisecond k.cconfig.DefaultBrokerConf.ReadTimeout = time.Duration(k.config.ReadTimeout) * time.Millisecond k.cconfig.DefaultBrokerConf.WriteTimeout = time.Duration(k.config.WriteTimeout) * time.Millisecond k.pconfig = sarama.NewProducerConfig() switch k.config.Partitioner { case "Random": k.pconfig.Partitioner = sarama.NewRandomPartitioner() if len(k.config.HashVariable) > 0 { return fmt.Errorf("hash_variable should not be set for the %s partitioner", k.config.Partitioner) } case "RoundRobin": k.pconfig.Partitioner = new(sarama.RoundRobinPartitioner) if len(k.config.HashVariable) > 0 { return fmt.Errorf("hash_variable should not be set for the %s partitioner", k.config.Partitioner) } case "Hash": k.pconfig.Partitioner = sarama.NewHashPartitioner() if k.hashVariable = verifyMessageVariable(k.config.HashVariable); k.hashVariable == nil { return fmt.Errorf("invalid hash_variable: %s", k.config.HashVariable) } default: return fmt.Errorf("invalid partitioner: %s", k.config.Partitioner) } if len(k.config.Topic) == 0 { if k.topicVariable = verifyMessageVariable(k.config.TopicVariable); k.topicVariable == nil { return fmt.Errorf("invalid topic_variable: %s", k.config.TopicVariable) } } else if len(k.config.TopicVariable) > 0 { return errors.New("topic and topic_variable cannot both be set") } switch k.config.RequiredAcks { case "NoResponse": k.pconfig.RequiredAcks = sarama.NoResponse case "WaitForLocal": k.pconfig.RequiredAcks = sarama.WaitForLocal case "WaitForAll": k.pconfig.RequiredAcks = sarama.WaitForAll default: return fmt.Errorf("invalid required_acks: %s", k.config.RequiredAcks) } k.pconfig.Timeout = time.Duration(k.config.Timeout) * time.Millisecond switch k.config.CompressionCodec { case "None": k.pconfig.Compression = sarama.CompressionNone case "GZIP": k.pconfig.Compression = sarama.CompressionGZIP case "Snappy": k.pconfig.Compression = sarama.CompressionSnappy default: return fmt.Errorf("invalid compression_codec: %s", k.config.CompressionCodec) } k.pconfig.MaxBufferedBytes = k.config.MaxBufferedBytes k.pconfig.MaxBufferTime = time.Duration(k.config.MaxBufferTime) * time.Millisecond k.pconfig.BackPressureThresholdBytes = k.config.BackPressureThresholdBytes k.client, err = sarama.NewClient(k.config.Id, k.config.Addrs, k.cconfig) if err != nil { return } k.producer, err = sarama.NewProducer(k.client, k.pconfig) return }
// NewKafkaDefaultPartitioner 创建一个部分兼容kafka.producer.DefaultPartitioner的分区算法 // // 1) key是string,int32和int64,使用kafka.producer.DefaultPartitioner的算法,即使用java.lang.Object.hashcode做hash取摸 // // 2) key是其他类型,使用sarama.NewHashPartitioner的算法 func NewKafkaDefaultPartitioner(topic string) sarama.Partitioner { p := sarama.NewHashPartitioner(topic) return &kafkaDefaultPartitioner{p} }