func (zc *zookeeperConsumerConnector) consume(topicCountMap map[string]int) (map[string][]*KafkaStream, error) {
	if topicCountMap == nil {
		return nil, errors.New("topicCountMap con't be nil")
	}
	consumerThreadSum := 0
	for _, v := range topicCountMap {
		consumerThreadSum += v
	}
	queuesAndStreams := make([]QueuesAndStreamPairs, 0)
	for i := 0; i < consumerThreadSum; i++ {
		queues := make(chan FetchedDataChunk)
		stream := &KafkaStream{queue: queues, consumerTimeoutMs: zc.config.ConsumerTimeoutMs, clientId: zc.config.ClientId}

		queuesAndStreams = append(queuesAndStreams, QueuesAndStreamPairs{queues: queues, stream: stream})
	}

	dirs := zkutils.CreateZKGroupDirs(zc.config.GroupID)
	if err := zc.registerConsumerInZK(dirs, zc.consumerIdString, topicCountMap); err != nil {
		return nil, err
	}
	if err := zc.reinitializeConsumer(constructTopicCount(zc.consumerIdString, topicCountMap), queuesAndStreams); err != nil {
		return nil, err
	}

	return zc.loadBalancerListener.kafkaMessageAndMetadataStreams, nil //loadBalancerListener.kafkaMessageAndMetadataStreams
}
func (zc *zookeeperConsumerConnector) reinitializeConsumer(topicCount TopicCount, queuesAndStreams []QueuesAndStreamPairs) error {
	dirs := zkutils.CreateZKGroupDirs(zc.config.GroupID)

	// listener to consumer and partition changes
	if zc.loadBalancerListener == nil {
		topicStreamsMap := make(map[string][]*KafkaStream)
		zc.loadBalancerListener = &ZKRebalancerListener{
			group:                          zc.config.GroupID,
			consumerIdString:               zc.consumerIdString,
			kafkaMessageAndMetadataStreams: topicStreamsMap,
		}
	}

	// create listener for session expired event if not exist yet
	if zc.sessionExpirationListener == nil {
		zc.sessionExpirationListener = &ZKSessionExpireListener{
			dirs:                 dirs,
			consumerIdString:     zc.consumerIdString,
			topicCount:           topicCount,
			loadBalancerListener: zc.loadBalancerListener,
		}
	}

	// create listener for topic partition change event if not exist yet
	if zc.topicPartitionChangeListener == nil {
		zc.topicPartitionChangeListener = &ZKTopicPartitionChangeListener{loadBalancerListener: zc.loadBalancerListener}
	}

	topicStreamsMap := zc.loadBalancerListener.kafkaMessageAndMetadataStreams

	consumerThreadIdsPerTopic := topicCount.getConsumerThreadIdsPerTopic()

	var allQueuesAndStreams []QueuesAndStreamPairs
	switch topicCount.(type) {
	case *staticTopicCount:
		allQueuesAndStreams = queuesAndStreams
	}

	topicThreadIds := make([]TopicThreadIdPairs, 0)

	for topic, cThreadIds := range consumerThreadIdsPerTopic {
		for _, cid := range cThreadIds {
			topicThreadIds = append(topicThreadIds, TopicThreadIdPairs{topic: topic, consumerThreadId: cid})
		}
	}

	if len(allQueuesAndStreams) != len(topicThreadIds) {
		return errors.New(fmt.Sprintf("Mismatch between thread ID count (%d) and queue count (%d)", len(topicThreadIds), len(allQueuesAndStreams)))
	}

	zc.topicThreadIdAndQueues = make(map[TopicThreadIdPairs]chan FetchedDataChunk)
	length := len(topicThreadIds)
	for i := 0; i < length; i++ {
		zc.topicThreadIdAndQueues[topicThreadIds[i]] = allQueuesAndStreams[i].queues
		v, ok := topicStreamsMap[topicThreadIds[i].topic]
		if !ok {
			v = make([]*KafkaStream, 0)
		}
		v = append(v, allQueuesAndStreams[i].stream)
		topicStreamsMap[topicThreadIds[i].topic] = v
	}
	// TODO: subscribeStateChanges
	//	// listener to consumer and partition changes
	//	zkClient.subscribeStateChanges(sessionExpirationListener)

	zc.zkClient.SubscribeChildChanges(dirs.ConsumerRegistryDir, zc.loadBalancerListener)

	for topic, _ := range topicStreamsMap {
		topicPath := zkutils.BrokerTopicsPath + "/" + topic
		zc.zkClient.SubscribeDataChanges(topicPath, zc.topicPartitionChangeListener)
	}

	// explicitly trigger load balancing for this consumer
	zc.loadBalancerListener.syncedRebalance()
	return nil
}