func getTopicMetadata(t *testing.T, k *kafka.Cluster, topic string) *TopicMetadataResponse { b := broker.New(k.AnyBroker()) defer b.Close() respMsg, err := Metadata(topic).Fetch(b) if err != nil { t.Fatal(err) } brokers := k.Brokers() for i := range brokers { if respMsg.Brokers[i].Addr() != brokers[i] { t.Fatalf("broker: expect %s but got %s", brokers[i], respMsg.Brokers[i].Addr()) } } if len(respMsg.TopicMetadatas) != 1 { t.Fatalf("len(TopicMetadatas): expect 1 but got %d", len(respMsg.TopicMetadatas)) } meta := &respMsg.TopicMetadatas[0] if meta.ErrorCode != NoError { t.Fatal(meta.ErrorCode) } if meta.TopicName != topic { t.Fatalf("topic: expect %s but got %s", topic, meta.TopicName) } return respMsg }
func getTopicMetadata(t *testing.T, k *kafka.Cluster, topic string) *TopicMetadataResponse { conn, err := net.Dial("tcp", k.AnyBroker()) if err != nil { t.Fatal(err) } defer conn.Close() req := &Request{ CorrelationID: rand.Int31(), RequestMessage: &TopicMetadataRequest{topic}, } respMsg := &TopicMetadataResponse{} resp := &Response{ResponseMessage: respMsg} sendReceive(t, conn, req, resp) brokers := k.Brokers() for i := range brokers { if respMsg.Brokers[i].Addr() != brokers[i] { t.Fatalf("broker: expect %s but got %s", brokers[i], respMsg.Brokers[i].Addr()) } } if len(respMsg.TopicMetadatas) != 1 { t.Fatalf("len(TopicMetadatas): expect 1 but got %d", len(respMsg.TopicMetadatas)) } meta := &respMsg.TopicMetadatas[0] if meta.ErrorCode != NoError { t.Fatal(meta.ErrorCode) } if meta.TopicName != topic { t.Fatalf("topic: expect %s but got %s", topic, meta.TopicName) } return respMsg }
func getCoord(t *testing.T, k *kafka.Cluster, group string) string { reqMsg := GroupCoordinatorRequest(group) req := &Request{ RequestMessage: &reqMsg, } respMsg := &GroupCoordinatorResponse{} resp := &Response{ResponseMessage: respMsg} conn, err := net.Dial("tcp", k.AnyBroker()) if err != nil { t.Fatal(err) } sendReceive(t, conn, req, resp) if respMsg.HasError() { t.Fatal(respMsg.ErrorCode) } return respMsg.Broker.Addr() }
func Dump(k *kafka.Cluster, topic string, newObj func() encoding.BinaryUnmarshaler) (string, error) { cl := cluster.New(broker.New, k.Brokers()) partitions, err := cl.Partitions(topic) if err != nil { return "", err } cr := consumer.New(cl) var lines []string for _, partition := range partitions { start, err := cr.FetchOffsetByTime(topic, partition, proto.Earliest) if err != nil { return "", err } end, err := cr.FetchOffsetByTime(topic, partition, proto.Latest) if err != nil { return "", err } for offset := start; offset < end; { messages, err := cr.Consume(topic, partition, offset) if err != nil { return "", err } if len(messages) == 0 { break } for _, message := range messages { obj := newObj() if err := obj.UnmarshalBinary(message.Value); err != nil { return "", err } jsonBuf, err := json.MarshalIndent(obj, "", "\t") if err != nil { return "", err } lines = append(lines, string(jsonBuf)) } offset += messages[len(messages)-1].Offset + 1 } } sort.Strings(lines) return strings.Join(lines, "\n"), nil }