Exemplo n.º 1
6
func NewKafkaDeliver(store *Store, clientId string, brokerList []string) (*KafkaDeliver, error) {
	log.Println("go=kafka at=new-kafka-deliver")
	clientConfig := sarama.NewClientConfig()
	producerConfig := sarama.NewProducerConfig()

	client, err := sarama.NewClient(clientId, brokerList, clientConfig)
	if err != nil {
		return nil, err
	}
	log.Println("go=kafka at=created-client")

	producer, err := sarama.NewProducer(client, producerConfig)
	if err != nil {
		return nil, err
	}
	log.Println("go=kafka at=created-producer")

	return &KafkaDeliver{
		clientId:          clientId,
		brokerList:        brokerList,
		store:             store,
		producer:          producer,
		producerConfig:    producerConfig,
		client:            client,
		clientConfig:      clientConfig,
		deliverGoroutines: 8,
		shutdownDeliver:   make(chan bool, 8),
		shutdown:          make(chan bool, 8),
	}, nil

}
Exemplo n.º 2
0
func NewKafka(numberOfMessages int, testLatency bool) *Kafka {
	pubClient, _ := sarama.NewClient("pub", []string{"localhost:9092"}, sarama.NewClientConfig())
	subClient, _ := sarama.NewClient("sub", []string{"localhost:9092"}, sarama.NewClientConfig())

	topic := "test"
	pub, _ := sarama.NewProducer(pubClient, sarama.NewProducerConfig())
	consumerConfig := sarama.NewConsumerConfig()
	consumerConfig.OffsetMethod = sarama.OffsetMethodNewest // Only read new messages
	consumerConfig.DefaultFetchSize = 10 * 1024 * 1024
	sub, _ := sarama.NewConsumer(subClient, topic, 0, "test", consumerConfig)

	var handler benchmark.MessageHandler
	if testLatency {
		handler = &benchmark.LatencyMessageHandler{
			NumberOfMessages: numberOfMessages,
			Latencies:        []float32{},
		}
	} else {
		handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages}
	}

	return &Kafka{
		handler:   handler,
		pubClient: pubClient,
		subClient: subClient,
		pub:       pub,
		sub:       sub,
		topic:     topic,
	}
}
Exemplo n.º 3
0
// Creates a KafkaLogger for a given kafka cluster. We identify ourselves with clientId.
func NewKafkaLogger(clientId string, brokers []string) (request_handler.SpadeEdgeLogger, error) {
	c, err := sarama.NewClient(clientId, brokers, sarama.NewClientConfig())
	if err != nil {
		return nil, err
	}

	config := sarama.NewProducerConfig()
	config.Partitioner = sarama.NewRoundRobinPartitioner
	config.FlushFrequency = 500 * time.Millisecond
	config.FlushMsgCount = 1000
	// Might want to try out compression
	config.Compression = sarama.CompressionNone
	config.AckSuccesses = true

	p, err := NewProducer(c, GetTopic(), config)
	if err != nil {
		return nil, err
	}

	k := &KafkaLogger{
		Producer: p,
	}
	hystrix.ConfigureCommand(hystrixCommandName, hystrix.CommandConfig{
		Timeout:               1000,
		MaxConcurrentRequests: hystrixConcurrencyLevel,
		ErrorPercentThreshold: 10,
	})
	return k, nil
}
Exemplo n.º 4
0
func Produce(Quit chan bool, Host []string, Topic string, Data chan []byte) {
	client, err := sarama.NewClient("crontab_client", Host, sarama.NewClientConfig())
	if err != nil {
		panic(err)
	} else {
		log.Println("kafka producer connected")
	}
	defer client.Close()

	cfg := sarama.NewProducerConfig()
	cfg.Partitioner = sarama.NewRoundRobinPartitioner
	producer, err := sarama.NewProducer(client, cfg)
	if err != nil {
		panic(err)
	}
	defer producer.Close()
	log.Println("kafka producer ready")

	for {
		select {
		case pack := <-Data:
			producer.Input() <- &sarama.MessageToSend{Topic: Topic, Key: nil, Value: sarama.ByteEncoder(pack)}
		case err := <-producer.Errors():
			log.Println(err)
		case <-Quit:
			break
		}
	}
}
Exemplo n.º 5
0
func produceNToTopicPartition(t *testing.T, n int, topic string, partition int, brokerAddr string) {
	client, err := sarama.NewClient("test-client", []string{brokerAddr}, sarama.NewClientConfig())
	if err != nil {
		t.Fatal(err)
	}
	defer client.Close()

	producerConfig := sarama.NewProducerConfig()
	partitionerFactory := &SaramaPartitionerFactory{NewFixedPartitioner}
	producerConfig.Partitioner = partitionerFactory.PartitionerConstructor
	producer, err := sarama.NewProducer(client, producerConfig)
	encoder := &Int32Encoder{}
	if err != nil {
		t.Fatal(err)
	}
	defer producer.Close()
	for i := 0; i < n; i++ {
		key, _ := encoder.Encode(uint32(partition))
		producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: sarama.ByteEncoder(key), Value: sarama.StringEncoder(fmt.Sprintf("test-kafka-message-%d", i))}
	}
	select {
	case e := <-producer.Errors():
		t.Fatalf("Failed to produce message: %s", e)
	case <-time.After(5 * time.Second):
	}
}
Exemplo n.º 6
0
func testSarama(topic string, partition int32, seconds int) {
	stop := false

	config := sarama.NewClientConfig()
	client, err := sarama.NewClient("siesta", []string{"localhost:9092"}, config)
	if err != nil {
		panic(err)
	}

	messageChannel := make(chan *sarama.MessageSet, 10000)
	count := 0
	go func() {
		for {
			set := <-messageChannel
			count += len(set.Messages)
		}
	}()

	broker, err := client.Leader(topic, partition)

	//warm up
	fmt.Println("warming up")
	for i := 0; i < 5; i++ {
		fetchRequest := new(sarama.FetchRequest)
		fetchRequest.MinBytes = 1
		fetchRequest.MaxWaitTime = 100
		fetchRequest.AddBlock(topic, partition, 0, 500)

		broker.Fetch("siesta", fetchRequest)
	}
	fmt.Println("warm up finished, starting")

	go func() {
		time.Sleep(time.Duration(seconds) * time.Second)
		stop = true
	}()

	offset := int64(0)
	if err != nil {
		panic(err)
	}
	for !stop {
		fetchRequest := new(sarama.FetchRequest)
		fetchRequest.MinBytes = 1
		fetchRequest.MaxWaitTime = 100
		fetchRequest.AddBlock(topic, partition, offset, 500)

		response, err := broker.Fetch("siesta", fetchRequest)
		if err != nil {
			panic(err)
		}
		set := response.Blocks[topic][partition].MsgSet
		messageChannel <- &set
		offset = set.Messages[len(set.Messages)-1].Offset
	}

	fmt.Printf("%d within %d secnods\n", count, seconds)
	fmt.Printf("%d average\n", count/seconds)
}
Exemplo n.º 7
0
/*
Gomkafka will Initialize a kafka client and producer based off of KafkaConfig.
*/
func Gomkafka(config *KafkaConfig) (*sarama.Client, *sarama.SimpleProducer, error) {
	client, err := sarama.NewClient(config.ClientID, config.Hosts, sarama.NewClientConfig())
	if err != nil {
		return nil, nil, err
	}

	producer, err := sarama.NewSimpleProducer(client, nil)
	if err != nil {
		return nil, nil, err
	}

	return client, producer, nil
}
Exemplo n.º 8
0
func (this *MirrorMaker) startProducers() {
	for i := 0; i < this.config.NumProducers; i++ {
		conf, err := ProducerConfigFromFile(this.config.ProducerConfig)
		if err != nil {
			panic(err)
		}
		if err = conf.Validate(); err != nil {
			panic(err)
		}

		client, err := sarama.NewClient(conf.Clientid, conf.BrokerList, sarama.NewClientConfig())
		if err != nil {
			panic(err)
		}

		config := sarama.NewProducerConfig()
		config.ChannelBufferSize = conf.SendBufferSize
		switch strings.ToLower(conf.CompressionCodec) {
		case "none":
			config.Compression = sarama.CompressionNone
		case "gzip":
			config.Compression = sarama.CompressionGZIP
		case "snappy":
			config.Compression = sarama.CompressionSnappy
		}
		config.FlushByteCount = conf.FlushByteCount
		config.FlushFrequency = conf.FlushTimeout
		config.FlushMsgCount = conf.BatchSize
		config.MaxMessageBytes = conf.MaxMessageBytes
		config.MaxMessagesPerReq = conf.MaxMessagesPerRequest
		if this.config.PreservePartitions {
			config.Partitioner = NewIntPartitioner
		} else {
			config.Partitioner = sarama.NewRandomPartitioner
		}
		config.RequiredAcks = sarama.RequiredAcks(conf.Acks)
		config.RetryBackoff = conf.RetryBackoff
		config.Timeout = conf.Timeout

		producer, err := sarama.NewProducer(client, config)
		if err != nil {
			panic(err)
		}
		this.producers = append(this.producers, producer)
		if this.config.PreserveOrder {
			go this.produceRoutine(producer, i)
		} else {
			go this.produceRoutine(producer, 0)
		}
	}
}
Exemplo n.º 9
0
// NewKafkaProducer creates a new produce. It will publish messages to the given topic.
// You may also provide a sarama.ProducerConfig with more precise configurations or nil to use default configuration
func NewKafkaProducer(topic string, brokerList []string) *KafkaProducer {
	client, err := sarama.NewClient(uuid.New(), brokerList, sarama.NewClientConfig())
	if err != nil {
		panic(err)
	}

	config := sarama.NewProducerConfig()
	config.FlushMsgCount = 1
	config.AckSuccesses = true
	producer, err := sarama.NewProducer(client, config)
	if err != nil {
		panic(err)
	}

	return &KafkaProducer{topic, brokerList, client, producer}
}
Exemplo n.º 10
0
func NewSaramaProducer(conf *ProducerConfig) Producer {
	if err := conf.Validate(); err != nil {
		panic(err)
	}

	client, err := sarama.NewClient(conf.Clientid, conf.BrokerList, sarama.NewClientConfig())
	if err != nil {
		panic(err)
	}

	config := sarama.NewProducerConfig()
	config.ChannelBufferSize = conf.SendBufferSize
	switch strings.ToLower(conf.CompressionCodec) {
	case "none":
		config.Compression = sarama.CompressionNone
	case "gzip":
		config.Compression = sarama.CompressionGZIP
	case "snappy":
		config.Compression = sarama.CompressionSnappy
	}
	config.FlushByteCount = conf.FlushByteCount
	config.FlushFrequency = conf.FlushTimeout
	config.FlushMsgCount = conf.BatchSize
	config.MaxMessageBytes = conf.MaxMessageBytes
	config.MaxMessagesPerReq = conf.MaxMessagesPerRequest
	config.RequiredAcks = sarama.RequiredAcks(conf.Acks)
	config.RetryBackoff = conf.RetryBackoff
	config.Timeout = conf.Timeout
	config.AckSuccesses = conf.AckSuccesses

	partitionerFactory := &SaramaPartitionerFactory{conf.Partitioner}
	config.Partitioner = partitionerFactory.PartitionerConstructor

	producer, err := sarama.NewProducer(client, config)
	if err != nil {
		panic(err)
	}
	saramaProducer := &SaramaProducer{
		saramaProducer: producer,
		config:         conf,
	}
	saramaProducer.initSuccesses()
	saramaProducer.initErrors()
	saramaProducer.initInput()

	return saramaProducer
}
Exemplo n.º 11
0
func main() {
	var fromStart = flag.Bool("fromStart", true, "Read from beginning of file")
	var topic = flag.String("topic", "tailf", "Kafka topic to produce to")
	var clientId = flag.String("clientId", "tailf-client", "Kafka client ID")
	var brokerList = flag.String("brokerList", "127.0.0.1:9092", "Kafka broker list, comma-delimited.")
	var verbose = flag.Bool("verbose", false, "Verbose output")
	flag.Parse()
	if len(flag.Args()) != 1 {
		flag.Usage()
		os.Exit(1)
	}
	var filename = flag.Arg(0)

	follower, err := tailf.Follow(filename, *fromStart)
	if err != nil {
		log.Fatalf("couldn't follow %q: %v", filename, err)
	}
	defer follower.Close()

	clientConfig := sarama.NewClientConfig()
	client, err := sarama.NewClient(*clientId, strings.Split(*brokerList, ","), clientConfig)
	if err != nil {
		panic(err)
	}
	defer client.Close()

	producerConfig := sarama.NewProducerConfig()
	producer, err := sarama.NewProducer(client, producerConfig)
	if err != nil {
		panic(err)
	}
	defer producer.Close()

	scanner := bufio.NewScanner(follower)
	for scanner.Scan() {
		producer.Input() <- &sarama.ProducerMessage{Topic: *topic, Key: nil, Value: sarama.ByteEncoder(scanner.Bytes())}
		if *verbose {
			log.Println("Produced message:", scanner.Text())
		}
	}
	if err := scanner.Err(); err != nil {
		log.Fatalf("scanner error: %v", err)
	}
}
Exemplo n.º 12
0
func setUpProducer(host string, port int, mode string) {

	connection := host + ":" + strconv.Itoa(port)

	log.Info("Connecting to Kafka on " + connection + "...")

	clientConfig := sarama.NewClientConfig()
	clientConfig.WaitForElection = (10 * time.Second)

	client, err := sarama.NewClient("client_id", []string{connection}, clientConfig)
	if err != nil {
		panic(err)
	} else {
		log.Info("Connection to Kafka successful")
	}

	/**
	*	 Create a producer with some specific setting
	 */
	producerConfig := sarama.NewProducerConfig()

	// if delivering messages async,  buffer them for at most MaxBufferTime
	producerConfig.MaxBufferTime = (2 * time.Second)

	// max bytes in buffer
	producerConfig.MaxBufferedBytes = 51200

	// Use zip compression
	producerConfig.Compression = 0

	// We are just streaming metrics, so don't not wait for any Kafka Acks.
	producerConfig.RequiredAcks = -1

	producer, err := sarama.NewProducer(client, producerConfig)
	if err != nil {
		panic(err)
	}

	go pushMetrics(producer, mode)

}
Exemplo n.º 13
0
func produceN(t *testing.T, n int, topic string, brokerAddr string) {
	client, err := sarama.NewClient("test-client", []string{brokerAddr}, sarama.NewClientConfig())
	if err != nil {
		t.Fatal(err)
	}
	defer client.Close()

	producer, err := sarama.NewProducer(client, sarama.NewProducerConfig())
	if err != nil {
		t.Fatal(err)
	}
	defer producer.Close()
	for i := 0; i < n; i++ {
		producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: nil, Value: sarama.StringEncoder(fmt.Sprintf("test-kafka-message-%d", i))}
	}
	select {
	case e := <-producer.Errors():
		t.Fatalf("Failed to produce message: %s", e)
	case <-time.After(5 * time.Second):
	}
}
Exemplo n.º 14
0
func main() {
	client, err := kafka.NewClient("client_id", []string{"localhost:9092"}, kafka.NewClientConfig())
	if err != nil {
		panic(err)
	} else {
		fmt.Println("> connected")
	}
	defer client.Close()

	producer, err := kafka.NewProducer(client, nil)
	if err != nil {
		panic(err)
	}
	defer producer.Close()

	err = producer.SendMessage("my_topic", nil, kafka.StringEncoder("testing 123"))
	if err != nil {
		panic(err)
	} else {
		fmt.Println("> message sent")
	}
}
Exemplo n.º 15
0
func (this *SyslogProducer) startProducers() {
	for i := 0; i < this.config.NumProducers; i++ {
		conf := this.config.ProducerConfig
		brokerList := strings.Split(this.config.BrokerList, ",")
		client, err := sarama.NewClient(conf.Clientid, brokerList, sarama.NewClientConfig())
		if err != nil {
			panic(err)
		}

		config := sarama.NewProducerConfig()
		config.ChannelBufferSize = conf.SendBufferSize
		switch strings.ToLower(conf.CompressionCodec) {
		case "none":
			config.Compression = sarama.CompressionNone
		case "gzip":
			config.Compression = sarama.CompressionGZIP
		case "snappy":
			config.Compression = sarama.CompressionSnappy
		}
		config.FlushByteCount = conf.FlushByteCount
		config.FlushFrequency = conf.FlushTimeout
		config.FlushMsgCount = conf.BatchSize
		config.MaxMessageBytes = conf.MaxMessageBytes
		config.MaxMessagesPerReq = conf.MaxMessagesPerRequest
		config.Partitioner = sarama.NewRandomPartitioner
		config.RequiredAcks = sarama.RequiredAcks(conf.Acks)
		config.RetryBackoff = conf.RetryBackoff
		config.Timeout = conf.Timeout

		Tracef(this, "Starting new producer with config: %#v", config)
		producer, err := sarama.NewProducer(client, config)
		if err != nil {
			panic(err)
		}
		this.producers = append(this.producers, producer)
		go this.produceRoutine(producer)
	}
}
Exemplo n.º 16
0
func produce(t *testing.T, messages []string, topic string, brokerAddr string, compression sarama.CompressionCodec) {
	client, err := sarama.NewClient("test-client", []string{brokerAddr}, sarama.NewClientConfig())
	if err != nil {
		t.Fatal(err)
	}
	defer client.Close()

	producerConfig := sarama.NewProducerConfig()
	producerConfig.Compression = compression
	producer, err := sarama.NewProducer(client, producerConfig)
	if err != nil {
		t.Fatal(err)
	}
	defer producer.Close()
	for _, message := range messages {
		producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: nil, Value: sarama.StringEncoder(message)}
	}
	select {
	case e := <-producer.Errors():
		t.Fatalf("Failed to produce message: %s", e)
	case <-time.After(5 * time.Second):
	}
}
Exemplo n.º 17
0
func main() {
	fmt.Println(("Starting Producer"))
	runtime.GOMAXPROCS(runtime.NumCPU())
	numMessage := 0

	brokerConnect, topic, sleepTime, graphiteConnect, graphiteFlushInterval, flushMsgCount, flushFrequency, producerCount, maxMessagesPerReq := resolveConfig()

	_ = graphiteConnect
	_ = graphiteFlushInterval
	startMetrics(graphiteConnect, graphiteFlushInterval)
	produceRate := metrics.NewRegisteredMeter("ProduceRate", metrics.DefaultRegistry)

	//kafkaClient.CreateMultiplePartitionsTopic(zkConnect, topic, numPartitions)

	//p := producer.NewKafkaProducer(topic, []string{brokerConnect})

	//defer producer.Close()
	//defer p.Close()

	saramaError := make(chan *sarama.ProducerError)
	saramaSuccess := make(chan *sarama.ProducerMessage)

	for i := 0; i < producerCount; i++ {
		client, err := sarama.NewClient(uuid.New(), []string{brokerConnect}, sarama.NewClientConfig())
		if err != nil {
			panic(err)
		}

		config := sarama.NewProducerConfig()
		config.FlushMsgCount = flushMsgCount
		config.FlushFrequency = flushFrequency
		config.AckSuccesses = true
		config.RequiredAcks = sarama.NoResponse //WaitForAll
		config.MaxMessagesPerReq = maxMessagesPerReq
		config.Timeout = 1000 * time.Millisecond
		//		config.Compression = 2
		producer, err := sarama.NewProducer(client, config)
		go func() {
			if err != nil {
				panic(err)
			}
			for {
				message := &sarama.ProducerMessage{Topic: topic, Key: sarama.StringEncoder(fmt.Sprintf("%d", numMessage)), Value: sarama.StringEncoder(fmt.Sprintf("message %d!", numMessage))}
				numMessage++
				producer.Input() <- message
				time.Sleep(sleepTime)
			}
		}()

		go func() {
			for {
				select {
				case error := <-producer.Errors():
					saramaError <- error
				case success := <-producer.Successes():
					saramaSuccess <- success
				}
			}
		}()
	}

	ctrlc := make(chan os.Signal, 1)
	signal.Notify(ctrlc, os.Interrupt)
	go func() {
		start := time.Now()
		count := 0
		for {
			select {
			case err := <-saramaError:
				fmt.Println(err)
			case <-saramaSuccess:
				produceRate.Mark(1)
				count++
				elapsed := time.Since(start)
				if elapsed.Seconds() >= 1 {
					fmt.Println(fmt.Sprintf("Per Second %d", count))
					count = 0
					start = time.Now()
				}
			}
		}
	}()
	<-ctrlc
}
Exemplo n.º 18
0
func (k *KafkaOutput) Init(config interface{}) (err error) {
	k.config = config.(*KafkaOutputConfig)
	if len(k.config.Addrs) == 0 {
		return errors.New("addrs must have at least one entry")
	}

	k.cconfig = sarama.NewClientConfig()
	k.cconfig.MetadataRetries = k.config.MetadataRetries
	k.cconfig.WaitForElection = time.Duration(k.config.WaitForElection) * time.Millisecond
	k.cconfig.BackgroundRefreshFrequency = time.Duration(k.config.BackgroundRefreshFrequency) * time.Millisecond

	k.cconfig.DefaultBrokerConf = sarama.NewBrokerConfig()
	k.cconfig.DefaultBrokerConf.MaxOpenRequests = k.config.MaxOpenRequests
	k.cconfig.DefaultBrokerConf.DialTimeout = time.Duration(k.config.DialTimeout) * time.Millisecond
	k.cconfig.DefaultBrokerConf.ReadTimeout = time.Duration(k.config.ReadTimeout) * time.Millisecond
	k.cconfig.DefaultBrokerConf.WriteTimeout = time.Duration(k.config.WriteTimeout) * time.Millisecond

	k.pconfig = sarama.NewProducerConfig()

	switch k.config.Partitioner {
	case "Random":
		k.pconfig.Partitioner = sarama.NewRandomPartitioner()
		if len(k.config.HashVariable) > 0 {
			return fmt.Errorf("hash_variable should not be set for the %s partitioner", k.config.Partitioner)
		}
	case "RoundRobin":
		k.pconfig.Partitioner = new(sarama.RoundRobinPartitioner)
		if len(k.config.HashVariable) > 0 {
			return fmt.Errorf("hash_variable should not be set for the %s partitioner", k.config.Partitioner)
		}
	case "Hash":
		k.pconfig.Partitioner = sarama.NewHashPartitioner()
		if k.hashVariable = verifyMessageVariable(k.config.HashVariable); k.hashVariable == nil {
			return fmt.Errorf("invalid hash_variable: %s", k.config.HashVariable)
		}
	default:
		return fmt.Errorf("invalid partitioner: %s", k.config.Partitioner)
	}

	if len(k.config.Topic) == 0 {
		if k.topicVariable = verifyMessageVariable(k.config.TopicVariable); k.topicVariable == nil {
			return fmt.Errorf("invalid topic_variable: %s", k.config.TopicVariable)
		}
	} else if len(k.config.TopicVariable) > 0 {
		return errors.New("topic and topic_variable cannot both be set")
	}

	switch k.config.RequiredAcks {
	case "NoResponse":
		k.pconfig.RequiredAcks = sarama.NoResponse
	case "WaitForLocal":
		k.pconfig.RequiredAcks = sarama.WaitForLocal
	case "WaitForAll":
		k.pconfig.RequiredAcks = sarama.WaitForAll
	default:
		return fmt.Errorf("invalid required_acks: %s", k.config.RequiredAcks)
	}

	k.pconfig.Timeout = time.Duration(k.config.Timeout) * time.Millisecond

	switch k.config.CompressionCodec {
	case "None":
		k.pconfig.Compression = sarama.CompressionNone
	case "GZIP":
		k.pconfig.Compression = sarama.CompressionGZIP
	case "Snappy":
		k.pconfig.Compression = sarama.CompressionSnappy
	default:
		return fmt.Errorf("invalid compression_codec: %s", k.config.CompressionCodec)
	}

	k.pconfig.MaxBufferedBytes = k.config.MaxBufferedBytes
	k.pconfig.MaxBufferTime = time.Duration(k.config.MaxBufferTime) * time.Millisecond
	k.pconfig.BackPressureThresholdBytes = k.config.BackPressureThresholdBytes

	k.client, err = sarama.NewClient(k.config.Id, k.config.Addrs, k.cconfig)
	if err != nil {
		return
	}
	k.producer, err = sarama.NewProducer(k.client, k.pconfig)
	return
}
Exemplo n.º 19
0
func (k *KafkaInput) Init(config interface{}) (err error) {
	k.config = config.(*KafkaInputConfig)
	if len(k.config.Addrs) == 0 {
		return errors.New("addrs must have at least one entry")
	}
	if len(k.config.Group) == 0 {
		k.config.Group = k.config.Id
	}

	k.clientConfig = sarama.NewClientConfig()
	k.clientConfig.MetadataRetries = k.config.MetadataRetries
	k.clientConfig.WaitForElection = time.Duration(k.config.WaitForElection) * time.Millisecond
	k.clientConfig.BackgroundRefreshFrequency = time.Duration(k.config.BackgroundRefreshFrequency) * time.Millisecond

	k.clientConfig.DefaultBrokerConf = sarama.NewBrokerConfig()
	k.clientConfig.DefaultBrokerConf.MaxOpenRequests = k.config.MaxOpenRequests
	k.clientConfig.DefaultBrokerConf.DialTimeout = time.Duration(k.config.DialTimeout) * time.Millisecond
	k.clientConfig.DefaultBrokerConf.ReadTimeout = time.Duration(k.config.ReadTimeout) * time.Millisecond
	k.clientConfig.DefaultBrokerConf.WriteTimeout = time.Duration(k.config.WriteTimeout) * time.Millisecond

	k.consumerConfig = sarama.NewConsumerConfig()
	k.consumerConfig.DefaultFetchSize = k.config.DefaultFetchSize
	k.consumerConfig.MinFetchSize = k.config.MinFetchSize
	k.consumerConfig.MaxMessageSize = k.config.MaxMessageSize
	k.consumerConfig.MaxWaitTime = time.Duration(k.config.MaxWaitTime) * time.Millisecond
	k.checkpointFilename = k.pConfig.Globals.PrependBaseDir(filepath.Join("kafka",
		fmt.Sprintf("%s.%s.%d.offset.bin", k.name, k.config.Topic, k.config.Partition)))

	switch k.config.OffsetMethod {
	case "Manual":
		k.consumerConfig.OffsetMethod = sarama.OffsetMethodManual
		if fileExists(k.checkpointFilename) {
			if k.consumerConfig.OffsetValue, err = readCheckpoint(k.checkpointFilename); err != nil {
				return fmt.Errorf("readCheckpoint %s", err)
			}
		} else {
			if err = os.MkdirAll(filepath.Dir(k.checkpointFilename), 0766); err != nil {
				return
			}
			k.consumerConfig.OffsetMethod = sarama.OffsetMethodOldest
		}
	case "Newest":
		k.consumerConfig.OffsetMethod = sarama.OffsetMethodNewest
		if fileExists(k.checkpointFilename) {
			if err = os.Remove(k.checkpointFilename); err != nil {
				return
			}
		}
	case "Oldest":
		k.consumerConfig.OffsetMethod = sarama.OffsetMethodOldest
		if fileExists(k.checkpointFilename) {
			if err = os.Remove(k.checkpointFilename); err != nil {
				return
			}
		}
	default:
		return fmt.Errorf("invalid offset_method: %s", k.config.OffsetMethod)
	}

	k.consumerConfig.EventBufferSize = k.config.EventBufferSize

	k.client, err = sarama.NewClient(k.config.Id, k.config.Addrs, k.clientConfig)
	if err != nil {
		return
	}
	k.consumer, err = sarama.NewConsumer(k.client, k.config.Topic, k.config.Partition, k.config.Group, k.consumerConfig)
	return
}