func getPartitions(c sarama.Consumer) ([]int32, error) { if *partitions == "all" { return c.Partitions(*topic) } tmp := strings.Split(*partitions, ",") var pList []int32 for i := range tmp { val, err := strconv.ParseInt(tmp[i], 10, 32) if err != nil { return nil, err } pList = append(pList, int32(val)) } return pList, nil }
// GetKafkaPartitions is a helper function to look up which partitions are available // via the given brokers for the given topic. This should be called only on startup. func GetKafkaPartitions(brokerHosts []string, topic string) (partitions []int32, err error) { if len(brokerHosts) == 0 { return partitions, errors.New("at least 1 broker host is required") } if len(topic) == 0 { return partitions, errors.New("topic name is required") } var cnsmr sarama.Consumer cnsmr, err = sarama.NewConsumer(brokerHosts, sarama.NewConfig()) if err != nil { return partitions, err } defer func() { if cerr := cnsmr.Close(); cerr != nil && err == nil { err = cerr } }() return cnsmr.Partitions(topic) }
func findPartitions(consumer sarama.Consumer, config consumeConfig) []int32 { allPartitions, err := consumer.Partitions(config.topic) if err != nil { fmt.Fprintf(os.Stderr, "Failed to read partitions for topic %v err=%v\n", config.topic, err) os.Exit(1) } _, hasDefaultOffset := config.offsets[-1] partitions := []int32{} if !hasDefaultOffset { for _, p := range allPartitions { _, ok := config.offsets[p] if ok { partitions = append(partitions, p) } } } else { partitions = allPartitions } return partitions }