Beispiel #1
0
func main() {
	brokers := flag.String("brokers", "localhost:9093", "Comma separated kafka brokers list")
	topic := flag.String("topic", "my-topic", "Kafka topic to send messages to")
	flag.Parse()

	logger := log.New(os.Stdout, "consumer ", log.Lmicroseconds)

	consumer, err := sarama.NewConsumer(strings.Split(*brokers, ","), nil)
	if err != nil {
		logger.Panicln(err)
	}

	defer func() {
		if err := consumer.Close(); err != nil {
			logger.Fatalln(err)
		}
	}()

	partitionConsumer, err := consumer.ConsumePartition(*topic, 0, sarama.OffsetNewest)
	if err != nil {
		logger.Panicln(err)
	}

	logger.Println("Start")
	i := 0
	for ; ; i++ {
		msg := <-partitionConsumer.Messages()
		if string(msg.Value) == "THE END" {
			break
		}
	}
	logger.Printf("Finished. Received %d messages.\n", i)

}
Beispiel #2
0
func main() {
	flag.Parse()

	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true
	consumer, err := sarama.NewConsumer(brokers, config)
	if err != nil {
		log.Fatalln(err)
	}
	defer func() {
		if err := consumer.Close(); err != nil {
			panic(err)
		}
	}()

	var pf ProcessFunc
	switch {
	case "+" == op:
		pf = processAdd
	case "-" == op:
		pf = processSub
	case "*" == op:
		pf = processMul
	case "/" == op:
		pf = processDiv
	}

	// Set up one partition_consumer for each partition
	partitions, err := consumer.Partitions(topic)
	if err != nil {
		log.Fatalln(err)
	}

	partition_consumers := make([]sarama.PartitionConsumer, len(partitions))
	for idx, partition := range partitions {
		pc, err := consumer.ConsumePartition(topic, partition, sarama.OffsetNewest)
		if err != nil {
			log.Fatalln(err)
		}

		partition_consumers[idx] = pc
		go func(pc sarama.PartitionConsumer) {
			Serve(pc.Messages(), pf)
		}(pc)

		go func(pc sarama.PartitionConsumer) {
			for err := range pc.Errors() {
				log.Println(err)
			}
		}(pc)
	}

	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)
	<-signals
	for _, pc := range partition_consumers {
		fmt.Println("Closing partition, next offset", pc.HighWaterMarkOffset())
		pc.AsyncClose()
	}
}
func main() {
	config := sarama.NewConfig()
	// Handle errors manually
	config.Consumer.Return.Errors = true

	consumer, err := sarama.NewConsumer([]string{kafkaAddr}, config)
	if err != nil {
		panic(err)
	}
	defer consumer.Close()

	logConsumer, err := consumer.ConsumePartition("buy", 0, sarama.OffsetNewest)
	if err != nil {
		panic(err)
	}
	defer logConsumer.Close()

	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)

	for {
		select {
		case err := <-logConsumer.Errors():
			log.Println(err)
		case msg := <-logConsumer.Messages():
			order := &Order{}
			json.Unmarshal(msg.Value, order)
			log.Printf("notification to %s with order %s", order.UserID, order.OrderID)
		case <-signals:
			return
		}
	}
}
Beispiel #4
0
func newConsumer() (masterConsumer kafka.Consumer, consumers []kafka.PartitionConsumer) {
	config := kafka.NewConfig()
	config.Net.KeepAlive = 30 * time.Second
	config.Consumer.Retry.Backoff = 25 * time.Millisecond

	consumers = make([]kafka.PartitionConsumer, 0)

	retry(func() (err error) {
		var consumer kafka.PartitionConsumer
		var partitions []int32

		masterConsumer, err = kafka.NewConsumer(kafkas, config)
		if err != nil {
			return
		}

		partitions, err = masterConsumer.Partitions(topic)
		if err != nil {
			return
		}

		for _, partition := range partitions {
			consumer, err = masterConsumer.ConsumePartition(topic, partition, kafka.OffsetNewest)
			if err != nil {
				return
			}

			consumers = append(consumers, consumer)
		}
		return
	})

	return
}
Beispiel #5
0
func main() {
	client, err := sarama.NewClient("a_logger_for_mhub", []string{"localhost:9092"}, nil)
	if err != nil {
		panic(err)
	} else {
		os.Stderr.WriteString("> connected\n")
	}
	defer client.Close()

	consumer, err := sarama.NewConsumer(client, "received", 0, "", nil)
	if err != nil {
		panic(err)
	} else {
		os.Stderr.WriteString("> consumer ready\n")
	}
	defer consumer.Close()

	for {
		select {
		case event := <-consumer.Events():
			if event.Err != nil {
				panic(event.Err)
			}
			fmt.Println(utf8.FullRune(event.Value))
		}
	}
}
Beispiel #6
0
// NewPartitionConsumer creates a new partition consumer instance
func NewPartitionConsumer(group *ConsumerGroup, partition int32) (*PartitionConsumer, error) {
	config := sarama.ConsumerConfig{
		DefaultFetchSize: group.config.DefaultFetchSize,
		EventBufferSize:  group.config.EventBufferSize,
		MaxMessageSize:   group.config.MaxMessageSize,
		MaxWaitTime:      group.config.MaxWaitTime,
		MinFetchSize:     group.config.MinFetchSize,
		OffsetMethod:     sarama.OffsetMethodOldest,
	}

	offset, err := group.Offset(partition)
	if err != nil {
		return nil, err
	} else if offset > 0 {
		config.OffsetMethod = sarama.OffsetMethodManual
		config.OffsetValue = offset
	}

	stream, err := sarama.NewConsumer(group.client, group.topic, partition, group.name, &config)
	if err != nil {
		return nil, err
	}

	return &PartitionConsumer{
		stream:    stream,
		topic:     group.topic,
		partition: partition,
	}, nil
}
Beispiel #7
0
func tailPartitions(client *sarama.Client, topic string, partitions []int32) {
	var wg sync.WaitGroup
	wg.Add(len(partitions))

	tailConsumer := func(partition int32) {
		defer wg.Done()

		consumerConfig := sarama.NewConsumerConfig()
		consumerConfig.OffsetMethod = sarama.OffsetMethodManual
		consumerConfig.OffsetValue = offset

		consumer, err := sarama.NewConsumer(client, topic, partition, "", consumerConfig)
		if err != nil {
			logger.Fatalf("err creating consumer: %s", err)
		}
		defer consumer.Close()

		for event := range consumer.Events() {
			logger.Printf("partition=%d offset=%d key=%s value=%s", event.Partition, event.Offset, event.Key, event.Value)
		}
	}

	for _, partition := range partitions {
		go tailConsumer(partition)
	}

	wg.Wait()
}
Beispiel #8
0
// NewKafkaSubscriber will initiate a the experimental Kafka consumer.
func NewKafkaSubscriber(cfg *config.Kafka, offsetProvider func() int64, offsetBroadcast func(int64)) (*KafkaSubscriber, error) {
	var (
		err error
	)
	s := &KafkaSubscriber{
		offset:          offsetProvider,
		broadcastOffset: offsetBroadcast,
		partition:       cfg.Partition,
		stop:            make(chan chan error, 1),
	}

	if len(cfg.BrokerHosts) == 0 {
		return s, errors.New("at least 1 broker host is required")
	}

	if len(cfg.Topic) == 0 {
		return s, errors.New("topic name is required")
	}
	s.topic = cfg.Topic

	sconfig := sarama.NewConfig()
	sconfig.Consumer.Return.Errors = true
	s.cnsmr, err = sarama.NewConsumer(cfg.BrokerHosts, sconfig)
	return s, err
}
Beispiel #9
0
func NewKafka(numberOfMessages int, testLatency bool) *Kafka {
	pubClient, _ := sarama.NewClient("pub", []string{"localhost:9092"}, sarama.NewClientConfig())
	subClient, _ := sarama.NewClient("sub", []string{"localhost:9092"}, sarama.NewClientConfig())

	topic := "test"
	pub, _ := sarama.NewProducer(pubClient, sarama.NewProducerConfig())
	consumerConfig := sarama.NewConsumerConfig()
	consumerConfig.OffsetMethod = sarama.OffsetMethodNewest // Only read new messages
	consumerConfig.DefaultFetchSize = 10 * 1024 * 1024
	sub, _ := sarama.NewConsumer(subClient, topic, 0, "test", consumerConfig)

	var handler benchmark.MessageHandler
	if testLatency {
		handler = &benchmark.LatencyMessageHandler{
			NumberOfMessages: numberOfMessages,
			Latencies:        []float32{},
		}
	} else {
		handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages}
	}

	return &Kafka{
		handler:   handler,
		pubClient: pubClient,
		subClient: subClient,
		pub:       pub,
		sub:       sub,
		topic:     topic,
	}
}
Beispiel #10
0
// NewConsumer returns a new Consumer
func NewConsumer(conf ConsumerConfig) (Consumer, error) {
	c := new(consumer)

	config := sarama.NewConfig()
	config.Net.DialTimeout = time.Second * 60
	if conf.SASLEnabled {
		config.Net.TLS.Enable = true
		config.Net.SASL.User = conf.Username
		config.Net.SASL.Password = conf.Password
		config.Net.SASL.Enable = conf.SASLEnabled
		config.ClientID = conf.ClientID
	}

	var err error
	c.consumer, err = sarama.NewConsumer(conf.Brokers, config)
	if err != nil {
		return nil, err
	}

	c.partConsumer, err = c.consumer.ConsumePartition(conf.Topic, 0, sarama.OffsetNewest)
	if err != nil {
		return nil, err
	}

	return c, nil

}
Beispiel #11
0
func main() {
	client, err := kafka.NewClient("my_client", []string{"localhost:9092"}, nil)
	if err != nil {
		panic(err)
	} else {
		fmt.Println("> connected")
	}
	defer client.Close()

	consumer, err := kafka.NewConsumer(client, "my_topic", 0, "my_consumer_group", kafka.NewConsumerConfig())
	if err != nil {
		panic(err)
	} else {
		fmt.Println("> consumer ready")
	}
	defer consumer.Close()

	msgCount := 0
consumerLoop:
	for {
		select {
		case event := <-consumer.Events():
			if event.Err != nil {
				panic(event.Err)
			}
			msgCount++
		case <-time.After(5 * time.Second):
			fmt.Println("> timed out")
			break consumerLoop
		}
	}
	fmt.Println("Got", msgCount, "messages.")
}
Beispiel #12
0
// Setup prepares the Requester for benchmarking.
func (k *kafkaRequester) Setup() error {
	config := sarama.NewConfig()
	producer, err := sarama.NewAsyncProducer(k.urls, config)
	if err != nil {
		return err
	}

	consumer, err := sarama.NewConsumer(k.urls, nil)
	if err != nil {
		producer.Close()
		return err
	}
	partitionConsumer, err := consumer.ConsumePartition(k.topic, 0, sarama.OffsetNewest)
	if err != nil {
		producer.Close()
		consumer.Close()
		return err
	}

	k.producer = producer
	k.consumer = consumer
	k.partitionConsumer = partitionConsumer
	k.msg = &sarama.ProducerMessage{
		Topic: k.topic,
		Value: sarama.ByteEncoder(make([]byte, k.payloadSize)),
	}

	return nil
}
func newTestConsumer(t *testing.T) sarama.Consumer {
	hosts := []string{getTestKafkaHost()}
	consumer, err := sarama.NewConsumer(hosts, nil)
	if err != nil {
		t.Fatal(err)
	}
	return consumer
}
Beispiel #14
0
// Creates a kafka consumer utilizing github.com/Shopify/sarama
func newKafkaConsumer(broker string) (kafka.Consumer, error) {
	config := kafka.NewConfig()
	consumer, err := kafka.NewConsumer([]string{broker}, config)

	if err != nil {
		return nil, err
	}
	return consumer, nil
}
Beispiel #15
0
func (c *KafkaClient) NewConsumer(hostports []string) error {
	consumer, err := sarama.NewConsumer(hostports, nil)
	if err != nil {
		log.Printf("[kafka] new a consumer %+v error, %s\n", hostports, err)
	} else {
		log.Printf("[kafka] new a consumer %+v success.\n", hostports)
	}
	c.Consumer = consumer
	return err
}
Beispiel #16
0
func (suite *KafkaTester) Test01() {
	t := suite.T()
	assert := assert.New(t)

	const M1 = "message one"
	const M2 = "message two"

	var producer sarama.AsyncProducer
	var consumer sarama.Consumer
	var partitionConsumer sarama.PartitionConsumer

	var err error

	topic := makeTopicName()

	{
		config := sarama.NewConfig()
		config.Producer.Return.Successes = false
		config.Producer.Return.Errors = false

		producer, err = sarama.NewAsyncProducer([]string{suite.server}, config)
		assert.NoError(err)
		defer close(t, producer)

		producer.Input() <- &sarama.ProducerMessage{
			Topic: topic,
			Key:   nil,
			Value: sarama.StringEncoder(M1)}

		producer.Input() <- &sarama.ProducerMessage{
			Topic: topic,
			Key:   nil,
			Value: sarama.StringEncoder(M2)}
	}

	{
		consumer, err = sarama.NewConsumer([]string{suite.server}, nil)
		assert.NoError(err)
		defer close(t, consumer)

		partitionConsumer, err = consumer.ConsumePartition(topic, 0, 0)
		assert.NoError(err)
		defer close(t, partitionConsumer)
	}

	{
		mssg1 := <-partitionConsumer.Messages()
		//t.Logf("Consumed: offset:%d  value:%v", mssg1.Offset, string(mssg1.Value))
		mssg2 := <-partitionConsumer.Messages()
		//t.Logf("Consumed: offset:%d  value:%v", mssg2.Offset, string(mssg2.Value))

		assert.EqualValues(M1, string(mssg1.Value))
		assert.EqualValues(M2, string(mssg2.Value))
	}
}
Beispiel #17
0
func (c *KafkaClient) NewConsumer(conf *Configure) error {
	hostports := conf.Kafka.Hosts
	consumer, err := sarama.NewConsumer(hostports, nil)
	if err != nil {
		log.Printf("[kafka] new a consumer %+v error, %s\n", hostports, err)
	} else {
		log.Printf("[kafka] new a consumer %+v success.\n", hostports)
	}
	c.Consumer = consumer
	return err
}
Beispiel #18
0
func GetConsumer() sarama.Consumer {

	config := sarama.NewConfig()

	consumer, err := sarama.NewConsumer(brokerList, config)
	if err != nil {
		log.Fatalln("Failed to start Sarama consumer", err)
	}

	return consumer
}
Beispiel #19
0
func main() {

	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true

	// Specify brokers address. This is default one
	brokers := []string{"localhost:9092"}

	// Create new consumer
	master, err := sarama.NewConsumer(brokers, config)
	if err != nil {
		panic(err)
	}

	defer func() {
		if err := master.Close(); err != nil {
			panic(err)
		}
	}()

	topic := "important"
	// How to decide partition, is it fixed value...?
	consumer, err := master.ConsumePartition(topic, 0, sarama.OffsetOldest)
	if err != nil {
		panic(err)
	}

	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)

	// Count how many message processed
	msgCount := 0

	// Get signnal for finish
	doneCh := make(chan struct{})
	go func() {
		for {
			select {
			case err := <-consumer.Errors():
				fmt.Println(err)
			case msg := <-consumer.Messages():
				msgCount++
				fmt.Println("Received messages", string(msg.Key), string(msg.Value))
			case <-signals:
				fmt.Println("Interrupt is detected")
				doneCh <- struct{}{}
			}
		}
	}()

	<-doneCh
	fmt.Println("Processed", msgCount, "messages")
}
Beispiel #20
0
func NewReader(kafkaHost string) *Reader {
	r := new(Reader)

	consumer, err := sarama.NewConsumer([]string{kafkaHost}, nil)
	if err != nil {
		panic(err)
	}

	r.consumer = consumer

	return r
}
Beispiel #21
0
func main() {
	flag.Parse()

	if *verbose {
		sarama.Logger = logger
	}

	var (
		initialOffset int64
		offsetError   error
	)
	switch *offset {
	case "oldest":
		initialOffset = sarama.OffsetOldest
	case "newest":
		initialOffset = sarama.OffsetNewest
	default:
		initialOffset, offsetError = strconv.ParseInt(*offset, 10, 64)
	}

	if offsetError != nil {
		logger.Fatalln("Invalid initial offset:", *offset)
	}

	c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil)
	if err != nil {
		logger.Fatalln(err)
	}

	pc, err := c.ConsumePartition(*topic, int32(*partition), initialOffset)
	if err != nil {
		logger.Fatalln(err)
	}

	go func() {
		signals := make(chan os.Signal, 1)
		signal.Notify(signals, os.Kill, os.Interrupt)
		<-signals
		pc.AsyncClose()
	}()

	for msg := range pc.Messages() {
		fmt.Printf("Offset: %d\n", msg.Offset)
		fmt.Printf("Key:    %s\n", string(msg.Key))
		fmt.Printf("Value:  %s\n", string(msg.Value))
		fmt.Println()
	}

	if err := c.Close(); err != nil {
		fmt.Println("Failed to close consumer: ", err)
	}
}
func (c *kafka_subscription) connect() error {
	logging.Info("connect")

	sconfig := sarama.NewConfig()
	logging.Debugf("broker list: %v", c.opts.Broker_list)

	master, err := sarama.NewConsumer(c.opts.Broker_list, sconfig)
	if err != nil {
		return fmt.Errorf("Cannot connect to kafka: %v", err)
	}
	c.master = master
	return nil
}
Beispiel #23
0
func main() {
	parseArgs()

	closer := listenForInterrupt()

	consumer, err := sarama.NewConsumer(config.brokers, nil)
	if err != nil {
		fmt.Fprintf(os.Stderr, "Failed to create consumer err=%v\n", err)
		os.Exit(1)
	}

	partitions, err := consumer.Partitions(config.topic)
	if err != nil {
		fmt.Fprintf(os.Stderr, "Failed to read partitions for topic %v err=%v\n", config.topic, err)
		os.Exit(1)
	}

	var wg sync.WaitGroup
consuming:
	for partition := range partitions {
		partitionConsumer, err := consumer.ConsumePartition(config.topic, int32(partition), config.startOffset)
		if err != nil {
			log.Printf("Failed to consume partition %v err=%v\n", partition, err)
			continue consuming
		}
		wg.Add(1)

		go func(pc sarama.PartitionConsumer) {
			for {
				select {
				case <-closer:
					pc.Close()
					wg.Done()
					return
				case msg, ok := <-pc.Messages():
					if ok {
						print(msg)
					}
					if config.endOffset > 0 && msg.Offset >= config.endOffset {
						pc.Close()
						wg.Done()
						return
					}
				}
			}
		}(partitionConsumer)
	}
	wg.Wait()
	consumer.Close()
}
Beispiel #24
0
func main() {

	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true

	// Specify brokers address. This is default one
	brokers := []string{"localhost:9092"}

	// Create new consumer
	master, err := sarama.NewConsumer(brokers, config)
	if err != nil {
		panic(err)
	}

	defer func() {
		if err := master.Close(); err != nil {
			panic(err)
		}
	}()

	topic := "important"
	// How to decide partition, is it fixed value...?
	consumer, err := master.ConsumePartition(topic, 0, sarama.OffsetNewest)
	if err != nil {
		panic(err)
	}

	go func() {
		for {
			select {
			case err := <-consumer.Errors():
				fmt.Println(err)
			case msg := <-consumer.Messages():

				temperature, err := strconv.ParseFloat(string(msg.Value), 64)
				if err != nil {
					panic(err)
				}

				fmt.Println("Received messages", string(msg.Key), temperature)

				evaluateTemperature(temperature)
			}
		}
	}()

	select {}

}
Beispiel #25
0
func NewSaramaConsumers(servers []string, topic, offsetType string) (*sarama.Consumer, *[]sarama.PartitionConsumer, error) {
	config := sarama.NewConfig()
	config.ClientID = ipresolver.GetLocalAddr()
	config.Consumer.Return.Errors = true
	consumer, err := sarama.NewConsumer(servers, config)
	if err != nil {
		return nil, nil, err
	}
	partitions, err := consumer.Partitions(topic)
	if err != nil {
		return nil, nil, err
	}
	if eatonconfig.IsDebug() {
		log.Println("Returned Partitions for topic: ", topic, partitions)
	}
	if len(partitions) == 0 {
		return nil, nil, errors.New("no partitions returned to consume!")
	}
	partitionConsumers := make([]sarama.PartitionConsumer, len(partitions), len(partitions))
	chosenOffset := sarama.OffsetOldest
	switch offsetType {
	case "oldest":
		chosenOffset = sarama.OffsetOldest
		break
	case "newest":
		chosenOffset = sarama.OffsetNewest
		break
	default:
		log.Fatal("unknown offsetType provided: ", offsetType)
	}
	for index, partition := range partitions {
		if eatonconfig.IsDebug() {
			log.Println("Creating partition consumer for partition: ", partition, " with offset: ", chosenOffset)
		}
		partitionConsumer, err := consumer.ConsumePartition(topic, partition, chosenOffset)
		if eatonconfig.IsDebug() {
			log.Println("Created partition consumer: ", consumer)
		}
		if err != nil {
			return nil, nil, err
		}
		if partitionConsumer == nil {
			return nil, nil, errors.New("nil consumer returned!")
		}
		partitionConsumers[index] = partitionConsumer
	}
	return &consumer, &partitionConsumers, nil
}
Beispiel #26
0
func newConsumer(brokers []string, kafkaVersion sarama.KafkaVersion, cp ChainPartition, offset int64) (Consumer, error) {
	parent, err := sarama.NewConsumer(brokers, newBrokerConfig(kafkaVersion, rawPartition))
	if err != nil {
		return nil, err
	}
	partition, err := parent.ConsumePartition(cp.Topic(), cp.Partition(), offset)
	if err != nil {
		return nil, err
	}
	c := &consumerImpl{
		parent:    parent,
		partition: partition,
	}
	logger.Debugf("Created new consumer for session (partition %s, beginning offset %d)", cp, offset)
	return c, nil
}
Beispiel #27
0
func (cm *CallbackManager) connectKafka() error {
	var err error

	// kafka consumer initialization
	brokers, err := cm.kazoo.BrokerList()
	if err != nil {
		return err
	}

	// connect kafka using sarama Consumer
	if cm.kafkaConsumer, err = sarama.NewConsumer(brokers, cm.kafkaConfig); err != nil {
		return err
	}

	return nil
}
Beispiel #28
0
func eat(topic string, partition int32) {
	consumer, err := sarama.NewConsumer([]string{"10.10.93.146:9092"}, nil)
	if err != nil {
		panic(err)
	}

	defer func() {
		if err := consumer.Close(); err != nil {
			log.Fatalln(err)
		}
	}()

	partitionConsumer, err := consumer.ConsumePartition(topic, partition, sarama.OffsetNewest)
	if err != nil {
		panic(err)
	}

	defer func() {
		if err := partitionConsumer.Close(); err != nil {
			log.Fatalln(err)
		}
	}()

	// Trap SIGINT to trigger a shutdown.
	signals := make(chan os.Signal, 1)
	signal.Notify(signals, os.Interrupt)

	//redis := openRedis()
	consumed := 0
ConsumerLoop:
	for {
		select {
		case msg := <-partitionConsumer.Messages():
			log.Printf("Consumed message offset %d\n", msg.Offset)
			//redis.SAdd("atime_ap", string(msg.Value))
			//log.Printf("Consumed message offset %d\n", msg.Offset)
			//log.Printf("Consumed message value %s\n", string(msg.Value))
			consumed++
		case <-signals:
			break ConsumerLoop
		}
	}

	log.Printf("Consumed: %d\n", consumed)
}
Beispiel #29
0
func (k *KafkaClient) initKafkaConsumer(broker string) error {
	config := sarama.NewConfig()
	config.Consumer.Return.Errors = true

	brokerList := []string{broker}
	k.log.Debug(brokerList)
	consumer, err := sarama.NewConsumer(brokerList, config)
	if err != nil {
		k.log.Error("Failed to start consumer:")
		k.log.Error(err)
		return err
	}

	k.log.Debug("Create new consumer done")
	k.consumer = consumer

	return nil
}
Beispiel #30
-2
// NewPeer creates and returns a new Peer for communicating with Kafka.
func NewPeer(host string) (*Peer, error) {
	host = strings.Split(host, ":")[0] + ":9092"
	config := sarama.NewConfig()
	client, err := sarama.NewClient([]string{host}, config)
	if err != nil {
		return nil, err
	}

	producer, err := sarama.NewAsyncProducer([]string{host}, config)
	if err != nil {
		return nil, err
	}

	consumer, err := sarama.NewConsumer([]string{host}, config)
	if err != nil {
		return nil, err
	}

	partitionConsumer, err := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
	if err != nil {
		return nil, err
	}

	return &Peer{
		client:   client,
		producer: producer,
		consumer: partitionConsumer,
		send:     make(chan []byte),
		errors:   make(chan error, 1),
		done:     make(chan bool),
	}, nil
}