Пример #1
0
// If a partition is claimed by another group member then `ClaimPartition` call
// blocks until it is released.
func (s *GroupRegistratorSuite) TestClaimPartitionCanceled(c *C) {
	// Given
	cfg := config.Default()
	gr1 := spawnGroupRegistrator("gr_test", "m1", cfg, s.kazooConn)
	defer gr1.stop()
	gr2 := spawnGroupRegistrator("gr_test", "m2", cfg, s.kazooConn)
	defer gr2.stop()
	cancelCh1 := make(chan none)
	cancelCh2 := make(chan none)
	wg := &sync.WaitGroup{}

	claim1 := gr1.claimPartition(s.cid, "foo", 1, cancelCh1)
	spawn(wg, func() {
		time.Sleep(300 * time.Millisecond)
		claim1()
	})

	// This goroutine will cancel the claim of m2 before, m1 releases the partition.
	spawn(wg, func() {
		time.Sleep(150 * time.Millisecond)
		close(cancelCh2)
	})

	// When
	claim2 := gr2.claimPartition(s.cid, "foo", 1, cancelCh2)
	defer claim2()

	// Then: the partition is still claimed by m1.
	owner, err := gr2.partitionOwner("foo", 1)
	c.Assert(err, IsNil)
	c.Assert(owner, Equals, "m1")

	// Wait for all test goroutines to stop.
	wg.Wait()
}
Пример #2
0
func GenMessages(c *C, prefix, topic string, keys map[string]int) map[string][]*sarama.ProducerMessage {
	config := config.Default()
	config.ClientID = "producer"
	config.Kafka.SeedPeers = testKafkaPeers
	producer, err := SpawnGracefulProducer(config)
	c.Assert(err, IsNil)

	messages := make(map[string][]*sarama.ProducerMessage)
	var wg sync.WaitGroup
	var lock sync.Mutex
	for key, count := range keys {
		for i := 0; i < count; i++ {
			key := key
			message := fmt.Sprintf("%s:%s:%d", prefix, key, i)
			spawn(&wg, func() {
				keyEncoder := sarama.StringEncoder(key)
				msgEncoder := sarama.StringEncoder(message)
				prodMsg, err := producer.Produce(topic, keyEncoder, msgEncoder)
				c.Assert(err, IsNil)
				log.Infof("*** produced: topic=%s, partition=%d, offset=%d, message=%s",
					topic, prodMsg.Partition, prodMsg.Offset, message)
				lock.Lock()
				messages[key] = append(messages[key], prodMsg)
				lock.Unlock()
			})
		}
	}
	wg.Wait()
	// Sort the produced messages in ascending order of their offsets.
	for _, keyMessages := range messages {
		sort.Sort(MessageSlice(keyMessages))
	}
	return messages
}
Пример #3
0
// If a group registrator resubscribes to the same list of topics, every group
// member gets a membership change notification same as the previous one.
func (s *GroupRegistratorSuite) TestReSubscribe(c *C) {
	// Given
	cfg := config.Default()
	cfg.Consumer.RebalanceDelay = 100 * time.Millisecond

	gr1 := spawnGroupRegistrator("gr_test", "m1", cfg, s.kazooConn)
	defer gr1.stop()
	gr1.topics() <- []string{"foo", "bar"}

	gr2 := spawnGroupRegistrator("gr_test", "m2", cfg, s.kazooConn)
	defer gr2.stop()
	gr2.topics() <- []string{"bazz", "bar"}

	membership := map[string][]string{
		"m1": {"bar", "foo"},
		"m2": {"bar", "bazz"},
	}
	c.Assert(<-gr1.membershipChanges(), DeepEquals, membership)
	c.Assert(<-gr2.membershipChanges(), DeepEquals, membership)

	// When
	gr1.topics() <- []string{"foo", "bar"}

	// Then
	c.Assert(<-gr1.membershipChanges(), DeepEquals, membership)
	c.Assert(<-gr2.membershipChanges(), DeepEquals, membership)
}
Пример #4
0
func (s *AdminSuite) SetUpSuite(c *C) {
	logging.InitTest()
	s.config = config.Default()
	s.config.ClientID = "producer"
	s.config.Kafka.SeedPeers = testKafkaPeers
	s.config.ZooKeeper.SeedPeers = testZookeeperPeers
}
Пример #5
0
func (s *GroupConsumerSuite) TestResolvePartitions(c *C) {
	cfg := config.Default()
	cfg.ClientID = "c"
	gc := groupConsumer{
		cfg: cfg,
		fetchTopicPartitionsFn: func(topic string) ([]int32, error) {
			return map[string][]int32{
				"t1": {1, 2, 3, 4, 5},
				"t2": {1, 2},
				"t3": {1, 2, 3, 4, 5},
				"t4": {1, 2, 3},
				"t5": {1, 2, 3},
			}[topic], nil
		},
	}

	// When
	topicsToPartitions, err := gc.resolvePartitions(
		map[string][]string{
			"a": {"t1", "t2", "t3"},
			"b": {"t1", "t2", "t3"},
			"c": {"t1", "t2", "t4", "t5"},
			"d": {"t1", "t4"},
		})

	// Then
	c.Assert(err, IsNil)
	c.Assert(topicsToPartitions, DeepEquals, map[string][]int32{
		"t1": {4},
		"t4": {1, 2},
		"t5": {1, 2, 3},
	})
}
Пример #6
0
func (s *GracefulProducerSuite) SetUpTest(c *C) {
	s.deadMessageCh = make(chan *sarama.ProducerMessage, 100)
	s.config = config.Default()
	s.config.Kafka.SeedPeers = testKafkaPeers
	s.config.Producer.DeadMessageCh = s.deadMessageCh
	s.tkc = NewTestKafkaClient(s.config.Kafka.SeedPeers)
}
Пример #7
0
func (s *ProducerSuite) SetUpTest(c *C) {
	s.deadMessageCh = make(chan *sarama.ProducerMessage, 100)
	s.cfg = config.Default()
	s.cfg.Kafka.SeedPeers = testhelpers.KafkaPeers
	s.cfg.Producer.DeadMessageCh = s.deadMessageCh
	s.kh = testhelpers.NewKafkaHelper(c)
}
Пример #8
0
func (s *AdminSuite) SetUpSuite(c *C) {
	testhelpers.InitLogging(c)
	s.cfg = config.Default()
	s.cfg.ClientID = "producer"
	s.cfg.Kafka.SeedPeers = testhelpers.KafkaPeers
	s.cfg.ZooKeeper.SeedPeers = testhelpers.ZookeeperPeers
	s.kh = testhelpers.NewKafkaHelper(c)
}
Пример #9
0
func (s *SmartConsumerSuite) SetUpSuite(c *C) {
	logging.InitTest()
	var err error
	config := config.Default()
	config.ClientID = "producer"
	config.Kafka.SeedPeers = testKafkaPeers
	config.ChannelBufferSize = 1
	s.producer, err = SpawnGracefulProducer(config)
	c.Assert(err, IsNil)
}
Пример #10
0
func NewTestConfig(clientID string) *config.T {
	cfg := config.Default()
	cfg.UnixAddr = path.Join(os.TempDir(), "kafka-pixy.sock")
	cfg.ClientID = clientID
	cfg.Kafka.SeedPeers = KafkaPeers
	cfg.ZooKeeper.SeedPeers = ZookeeperPeers
	cfg.Consumer.LongPollingTimeout = 3000 * time.Millisecond
	cfg.Consumer.BackOffTimeout = 100 * time.Millisecond
	cfg.Consumer.RebalanceDelay = 100 * time.Millisecond
	return cfg
}
Пример #11
0
// When a list of topics is sent to the `topics()` channel, a membership change
// is received with the same list of topics for the registrator name.
func (s *GroupRegistratorSuite) TestSimpleSubscribe(c *C) {
	// Given
	cfg := config.Default()
	cfg.Consumer.RebalanceDelay = 200 * time.Millisecond
	gr := spawnGroupRegistrator("gr_test", "m1", cfg, s.kazooConn)
	defer gr.stop()

	// When
	gr.topics() <- []string{"foo", "bar"}

	// Then
	c.Assert(<-gr.membershipChanges(), DeepEquals,
		map[string][]string{"m1": {"bar", "foo"}})
}
Пример #12
0
func (s *ConsumerGroupRegistrySuite) TestResubscribe(c *C) {
	// Given
	config := config.Default()
	config.Consumer.RebalanceDelay = 200 * time.Millisecond
	cgr := spawnConsumerGroupRegister("cgr_test", "m1", config, s.kazooConn)
	defer cgr.stop()
	cgr.topics() <- []string{"foo", "bar"}

	// When
	cgr.topics() <- []string{"blah", "bazz"}

	// Then
	c.Assert(<-cgr.membershipChanges(), DeepEquals,
		map[string][]string{"m1": {"bazz", "blah"}})
}
Пример #13
0
func (s *GroupConsumerSuite) TestResolvePartitionsEmpty(c *C) {
	cfg := config.Default()
	cfg.ClientID = "c"
	gc := groupConsumer{
		cfg: cfg,
		fetchTopicPartitionsFn: func(topic string) ([]int32, error) {
			return nil, nil
		},
	}

	// When
	topicsToPartitions, err := gc.resolvePartitions(nil)

	// Then
	c.Assert(err, IsNil)
	c.Assert(topicsToPartitions, DeepEquals, map[string][]int32{})
}
Пример #14
0
func (s *GroupConsumerSuite) TestResolvePartitionsError(c *C) {
	cfg := config.Default()
	cfg.ClientID = "c"
	gc := groupConsumer{
		cfg: cfg,
		fetchTopicPartitionsFn: func(topic string) ([]int32, error) {
			return nil, errors.New("Kaboom!")
		},
	}

	// When
	topicsToPartitions, err := gc.resolvePartitions(map[string][]string{"c": {"t1"}})

	// Then
	c.Assert(err.Error(), Equals, "failed to get partition list: topic=t1, err=(Kaboom!)")
	c.Assert(topicsToPartitions, IsNil)
}
Пример #15
0
// It is ok to claim the same partition twice by the same group member.
func (s *GroupRegistratorSuite) TestClaimPartitionTwice(c *C) {
	// Given
	cfg := config.Default()
	gr := spawnGroupRegistrator("gr_test", "m1", cfg, s.kazooConn)
	defer gr.stop()
	cancelCh := make(chan none)

	// When
	claim1 := gr.claimPartition(s.cid, "foo", 1, cancelCh)
	defer claim1()
	claim2 := gr.claimPartition(s.cid, "foo", 1, cancelCh)
	defer claim2()

	// Then
	owner, err := gr.partitionOwner("foo", 1)
	c.Assert(err, IsNil)
	c.Assert(owner, Equals, "m1")
}
Пример #16
0
// If a partition has been claimed more then once then it is release as soon as
// any of the claims is revoked.
func (s *GroupRegistratorSuite) TestReleasePartition(c *C) {
	// Given
	cfg := config.Default()
	gr := spawnGroupRegistrator("gr_test", "m1", cfg, s.kazooConn)
	defer gr.stop()
	cancelCh := make(chan none)
	claim1 := gr.claimPartition(s.cid, "foo", 1, cancelCh)
	claim2 := gr.claimPartition(s.cid, "foo", 1, cancelCh)

	// When
	claim2() // the second claim is revoked here but it could have been any.

	// Then
	owner, err := gr.partitionOwner("foo", 1)
	c.Assert(err, IsNil)
	c.Assert(owner, Equals, "")

	claim1()
}
Пример #17
0
func (s *ConsumerGroupRegistrySuite) TestClaimPartition(c *C) {
	// Given
	config := config.Default()
	cgr := spawnConsumerGroupRegister("cgr_test", "m1", config, s.kazooConn)
	defer cgr.stop()
	cancelCh := make(chan none)

	owner, err := cgr.partitionOwner("foo", 1)
	c.Assert(err, IsNil)
	c.Assert(owner, Equals, "")

	// When
	claim1 := cgr.claimPartition(s.cid, "foo", 1, cancelCh)
	defer claim1()

	// Then
	owner, err = cgr.partitionOwner("foo", 1)
	c.Assert(err, IsNil)
	c.Assert(owner, Equals, "m1")
}
Пример #18
0
func init() {
	cfg = config.Default()
	var kafkaPeers, zookeeperPeers string

	flag.StringVar(&cfg.TCPAddr, "tcpAddr", defaultTCPAddr, "TCP address that the HTTP API should listen on")
	flag.StringVar(&cfg.UnixAddr, "unixAddr", "", "Unix domain socket address that the HTTP API should listen on")
	flag.StringVar(&kafkaPeers, "kafkaPeers", defaultKafkaPeers, "Comma separated list of brokers")
	flag.StringVar(&zookeeperPeers, "zookeeperPeers", defaultZookeeperPeers, "Comma separated list of ZooKeeper nodes followed by optional chroot")
	flag.StringVar(&pidFile, "pidFile", "", "Path to the PID file")
	flag.StringVar(&loggingJSONCfg, "logging", defaultLoggingCfg, "Logging configuration")
	flag.Parse()

	cfg.Kafka.SeedPeers = strings.Split(kafkaPeers, ",")

	chrootStartIdx := strings.Index(zookeeperPeers, "/")
	if chrootStartIdx >= 0 {
		cfg.ZooKeeper.SeedPeers = strings.Split(zookeeperPeers[:chrootStartIdx], ",")
		cfg.ZooKeeper.Chroot = zookeeperPeers[chrootStartIdx:]
	} else {
		cfg.ZooKeeper.SeedPeers = strings.Split(zookeeperPeers, ",")
	}
}
Пример #19
0
// If a consumer group member instance tries to acquire a partition that has
// already been acquired by another member then it fails.
func (s *ConsumerGroupRegistrySuite) TestClaimPartitionClaimed(c *C) {
	// Given
	config := config.Default()
	cgr1 := spawnConsumerGroupRegister("cgr_test", "m1", config, s.kazooConn)
	defer cgr1.stop()
	cgr2 := spawnConsumerGroupRegister("cgr_test", "m2", config, s.kazooConn)
	defer cgr2.stop()
	cancelCh := make(chan none)
	close(cancelCh) // there will be no retries

	claim1 := cgr1.claimPartition(s.cid, "foo", 1, cancelCh)
	defer claim1()

	// When
	claim2 := cgr2.claimPartition(s.cid, "foo", 1, cancelCh)
	defer claim2()

	// Then
	owner, err := cgr1.partitionOwner("foo", 1)
	c.Assert(err, IsNil)
	c.Assert(owner, Equals, "m1")
}
Пример #20
0
func ResetOffsets(c *C, group, topic string) {
	config := config.Default()
	config.Kafka.SeedPeers = testKafkaPeers
	config.ZooKeeper.SeedPeers = testZookeeperPeers

	kafkaClient, err := sarama.NewClient(config.Kafka.SeedPeers, config.SaramaConfig())
	c.Assert(err, IsNil)
	defer kafkaClient.Close()

	offsetManager, err := sarama.NewOffsetManagerFromClient(kafkaClient)
	c.Assert(err, IsNil)
	partitions, err := kafkaClient.Partitions(topic)
	c.Assert(err, IsNil)
	for _, p := range partitions {
		offset, err := kafkaClient.GetOffset(topic, p, sarama.OffsetNewest)
		c.Assert(err, IsNil)
		pom, err := offsetManager.ManagePartition(group, topic, p)
		c.Assert(err, IsNil)
		pom.SubmitOffset(offset, "dummy")
		log.Infof("Set initial offset %s/%s/%d=%d", group, topic, p, offset)
		pom.Close()
	}
	offsetManager.Close()
}
Пример #21
0
// If a partition is claimed by another group member then `ClaimPartition` call
// blocks until it is released.
func (s *GroupRegistratorSuite) TestClaimPartitionParallel(c *C) {
	// Given
	cfg := config.Default()
	gr1 := spawnGroupRegistrator("gr_test", "m1", cfg, s.kazooConn)
	defer gr1.stop()
	gr2 := spawnGroupRegistrator("gr_test", "m2", cfg, s.kazooConn)
	defer gr2.stop()
	cancelCh := make(chan none)

	claim1 := gr1.claimPartition(s.cid, "foo", 1, cancelCh)
	go func() {
		time.Sleep(300 * time.Millisecond)
		claim1()
	}()

	// When: block here until m1 releases the claim over foo/1.
	claim2 := gr2.claimPartition(s.cid, "foo", 1, cancelCh)
	defer claim2()

	// Then: the partition is claimed by m2.
	owner, err := gr2.partitionOwner("foo", 1)
	c.Assert(err, IsNil)
	c.Assert(owner, Equals, "m2")
}