Beispiel #1
0
func tailPartitions(client *sarama.Client, topic string, partitions []int32) {
	var wg sync.WaitGroup
	wg.Add(len(partitions))

	tailConsumer := func(partition int32) {
		defer wg.Done()

		consumerConfig := sarama.NewConsumerConfig()
		consumerConfig.OffsetMethod = sarama.OffsetMethodManual
		consumerConfig.OffsetValue = offset

		consumer, err := sarama.NewConsumer(client, topic, partition, "", consumerConfig)
		if err != nil {
			logger.Fatalf("err creating consumer: %s", err)
		}
		defer consumer.Close()

		for event := range consumer.Events() {
			logger.Printf("partition=%d offset=%d key=%s value=%s", event.Partition, event.Offset, event.Key, event.Value)
		}
	}

	for _, partition := range partitions {
		go tailConsumer(partition)
	}

	wg.Wait()
}
Beispiel #2
0
func NewKafka(numberOfMessages int, testLatency bool) *Kafka {
	pubClient, _ := sarama.NewClient("pub", []string{"localhost:9092"}, sarama.NewClientConfig())
	subClient, _ := sarama.NewClient("sub", []string{"localhost:9092"}, sarama.NewClientConfig())

	topic := "test"
	pub, _ := sarama.NewProducer(pubClient, sarama.NewProducerConfig())
	consumerConfig := sarama.NewConsumerConfig()
	consumerConfig.OffsetMethod = sarama.OffsetMethodNewest // Only read new messages
	consumerConfig.DefaultFetchSize = 10 * 1024 * 1024
	sub, _ := sarama.NewConsumer(subClient, topic, 0, "test", consumerConfig)

	var handler benchmark.MessageHandler
	if testLatency {
		handler = &benchmark.LatencyMessageHandler{
			NumberOfMessages: numberOfMessages,
			Latencies:        []float32{},
		}
	} else {
		handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages}
	}

	return &Kafka{
		handler:   handler,
		pubClient: pubClient,
		subClient: subClient,
		pub:       pub,
		sub:       sub,
		topic:     topic,
	}
}
Beispiel #3
0
func main() {
	client, err := kafka.NewClient("my_client", []string{"localhost:9092"}, nil)
	if err != nil {
		panic(err)
	} else {
		fmt.Println("> connected")
	}
	defer client.Close()

	consumer, err := kafka.NewConsumer(client, "my_topic", 0, "my_consumer_group", kafka.NewConsumerConfig())
	if err != nil {
		panic(err)
	} else {
		fmt.Println("> consumer ready")
	}
	defer consumer.Close()

	msgCount := 0
consumerLoop:
	for {
		select {
		case event := <-consumer.Events():
			if event.Err != nil {
				panic(event.Err)
			}
			msgCount++
		case <-time.After(5 * time.Second):
			fmt.Println("> timed out")
			break consumerLoop
		}
	}
	fmt.Println("Got", msgCount, "messages.")
}
func main() {
	err := envconfig.Process("KC", &appConfig)
	if err != nil {
		log.Fatal(err.Error())
	}
	fmt.Println(appConfig)
	client, err := sarama.NewClient("my_client", []string{appConfig.Broker}, nil)

	if err != nil {
		panic(err)
	} else {
		fmt.Println("> connected")
	}
	defer client.Close()

	consumerConfig := sarama.NewConsumerConfig()
	consumerConfig.OffsetMethod = sarama.OffsetMethodNewest
	consumer, err := sarama.NewConsumer(client, appConfig.Topic,
		appConfig.Partition, "my_consumer_group", consumerConfig)
	if err != nil {
		panic(err)
	} else {
		fmt.Println("> consumer ready")
	}
	defer consumer.Close()

	var event *sarama.ConsumerEvent
	for {
		select {
		case event = <-consumer.Events():
			if event.Err != nil {
				panic(event.Err)
			}
			fmt.Println("got message:", string(event.Value))
		}
	}
}
Beispiel #5
0
func Consume(quit chan bool, server []string, topic, cursor string, partition int32, updater func(Key, Value, Type string, Offset int64) bool) {
	logPrintln("Start Consume")
	cursorFile, errc := os.OpenFile(cursor, os.O_CREATE|os.O_RDWR, 0666)
	if errc != nil {
		panic(errc)
	}
	defer cursorFile.Close()

	client, err := sarama.NewClient("crontab_client", server, nil)
	if err != nil {
		panic(err)
	} else {
		logPrintln("kafka consumer connected")
	}
	defer client.Close()
	cfg := sarama.NewConsumerConfig()
	cfg.OffsetMethod = sarama.OffsetMethodManual
	cfg.OffsetValue = readCursor(cursorFile)
	consumer, err := sarama.NewConsumer(client, topic, partition, "crontab_group", cfg)
	if err != nil {
		panic(err)
	} else {
		logPrintln("kafka consumer ready")
	}
	defer consumer.Close()

consumerLoop:
	for {
		select {
		case <-quit:
			logPrintln("kafka consumer quit")
			break consumerLoop
		case event := <-consumer.Events():
			if event.Err != nil {
				logPrintln(event.Err)
				continue
			}
			var out map[string]interface{}
			err = codec.NewDecoderBytes(event.Value, &codec.MsgpackHandle{}).Decode(&out)
			if err != nil {
				logPrintln(err)
				continue
			}
			userSceneId := "0"
			switch v := out["userSceneId"].(type) {
			case uint64:
				userSceneId = strconv.FormatInt(int64(v), 10)
			case string:
				userSceneId = v
			case []byte:
				userSceneId = string(v)
			}
			year := time.Now().Year()
			timer := string(out["timer"].([]byte))
			types := string(out["type"].([]byte))
			arr := strings.Split(timer, " ")
			if len(arr) == 5 && types == "add" {
				if strings.Contains(arr[3], "*") {
					timer = strconv.Itoa(rand.Intn(10)) + " " + timer + " *"
				} else {
					timer = strconv.Itoa(rand.Intn(10)) + " " + timer + " " + strconv.Itoa(year)
				}
			}
			ret := updater(userSceneId+" "+string(out["key"].([]byte)), timer, types, event.Offset)
			if ret == true {
				updateCursor(event.Offset+1, cursorFile)
			}
		}
	}
}
Beispiel #6
0
func (k *KafkaInput) Init(config interface{}) (err error) {
	k.config = config.(*KafkaInputConfig)
	if len(k.config.Addrs) == 0 {
		return errors.New("addrs must have at least one entry")
	}
	if len(k.config.Group) == 0 {
		k.config.Group = k.config.Id
	}

	k.clientConfig = sarama.NewClientConfig()
	k.clientConfig.MetadataRetries = k.config.MetadataRetries
	k.clientConfig.WaitForElection = time.Duration(k.config.WaitForElection) * time.Millisecond
	k.clientConfig.BackgroundRefreshFrequency = time.Duration(k.config.BackgroundRefreshFrequency) * time.Millisecond

	k.clientConfig.DefaultBrokerConf = sarama.NewBrokerConfig()
	k.clientConfig.DefaultBrokerConf.MaxOpenRequests = k.config.MaxOpenRequests
	k.clientConfig.DefaultBrokerConf.DialTimeout = time.Duration(k.config.DialTimeout) * time.Millisecond
	k.clientConfig.DefaultBrokerConf.ReadTimeout = time.Duration(k.config.ReadTimeout) * time.Millisecond
	k.clientConfig.DefaultBrokerConf.WriteTimeout = time.Duration(k.config.WriteTimeout) * time.Millisecond

	k.consumerConfig = sarama.NewConsumerConfig()
	k.consumerConfig.DefaultFetchSize = k.config.DefaultFetchSize
	k.consumerConfig.MinFetchSize = k.config.MinFetchSize
	k.consumerConfig.MaxMessageSize = k.config.MaxMessageSize
	k.consumerConfig.MaxWaitTime = time.Duration(k.config.MaxWaitTime) * time.Millisecond
	k.checkpointFilename = k.pConfig.Globals.PrependBaseDir(filepath.Join("kafka",
		fmt.Sprintf("%s.%s.%d.offset.bin", k.name, k.config.Topic, k.config.Partition)))

	switch k.config.OffsetMethod {
	case "Manual":
		k.consumerConfig.OffsetMethod = sarama.OffsetMethodManual
		if fileExists(k.checkpointFilename) {
			if k.consumerConfig.OffsetValue, err = readCheckpoint(k.checkpointFilename); err != nil {
				return fmt.Errorf("readCheckpoint %s", err)
			}
		} else {
			if err = os.MkdirAll(filepath.Dir(k.checkpointFilename), 0766); err != nil {
				return
			}
			k.consumerConfig.OffsetMethod = sarama.OffsetMethodOldest
		}
	case "Newest":
		k.consumerConfig.OffsetMethod = sarama.OffsetMethodNewest
		if fileExists(k.checkpointFilename) {
			if err = os.Remove(k.checkpointFilename); err != nil {
				return
			}
		}
	case "Oldest":
		k.consumerConfig.OffsetMethod = sarama.OffsetMethodOldest
		if fileExists(k.checkpointFilename) {
			if err = os.Remove(k.checkpointFilename); err != nil {
				return
			}
		}
	default:
		return fmt.Errorf("invalid offset_method: %s", k.config.OffsetMethod)
	}

	k.consumerConfig.EventBufferSize = k.config.EventBufferSize

	k.client, err = sarama.NewClient(k.config.Id, k.config.Addrs, k.clientConfig)
	if err != nil {
		return
	}
	k.consumer, err = sarama.NewConsumer(k.client, k.config.Topic, k.config.Partition, k.config.Group, k.consumerConfig)
	return
}