コード例 #1
0
ファイル: exe_webhook.go プロジェクト: funkygao/gafka
func (this *WebhookExecutor) Run() {
	// TODO watch the znode change, its endpoint might change any time
	if len(this.endpoints) == 0 {
		log.Warn("%s disabled webhook: empty endpoints", this.topic)
		return
	}

	this.appid = manager.Default.TopicAppid(this.topic)
	if this.appid == "" {
		log.Warn("invalid topic: %s", this.topic)
		return
	}

	this.appSignature = manager.Default.Signature(this.appid)
	if this.appSignature == "" {
		log.Warn("%s/%s invalid app signature", this.topic, this.appid)
	}

	cf := consumergroup.NewConfig()
	cf.Net.DialTimeout = time.Second * 10
	cf.Net.WriteTimeout = time.Second * 10
	cf.Net.ReadTimeout = time.Second * 10
	cf.ChannelBufferSize = 100
	cf.Consumer.Return.Errors = true
	cf.Consumer.MaxProcessingTime = time.Second * 2 // chan recv timeout
	cf.Zookeeper.Chroot = meta.Default.ZkChroot(this.cluster)
	cf.Zookeeper.Timeout = zk.DefaultZkSessionTimeout()
	cf.Offsets.CommitInterval = time.Minute
	cf.Offsets.ProcessingTimeout = time.Second
	cf.Offsets.ResetOffsets = false
	cf.Offsets.Initial = sarama.OffsetOldest
	cg, err := consumergroup.JoinConsumerGroup(groupName, []string{this.topic}, meta.Default.ZkAddrs(), cf)
	if err != nil {
		log.Error("%s stopped: %s", this.topic, err)
		return
	}
	this.fetcher = cg

	var wg sync.WaitGroup
	for i := 0; i < 1; i++ {
		wg.Add(1)
		go this.pump(&wg)
	}

	for {
		select {
		case <-this.stopper:
			log.Debug("%s stopping", this.topic)
			wg.Wait()
			return

		case err := <-cg.Errors():
			log.Error("%s %s", this.topic, err)
			// TODO

		case msg := <-cg.Messages():
			this.msgCh <- msg
		}

	}

}
コード例 #2
0
ファイル: submanager.go プロジェクト: funkygao/gafka
func (this *subManager) PickConsumerGroup(cluster, topic, group, remoteAddr, realIp string,
	resetOffset string, permitStandby bool) (cg *consumergroup.ConsumerGroup, err error) {
	// find consumger group from cache
	var present bool
	this.clientMapLock.RLock()
	cg, present = this.clientMap[remoteAddr]
	this.clientMapLock.RUnlock()
	if present {
		return
	}

	if !permitStandby {
		// ensure concurrent sub threads didn't exceed partition count
		// the 1st non-strict barrier, consumer group is the final barrier
		partitionN := len(meta.Default.TopicPartitions(cluster, topic))
		if partitionN == 0 {
			err = store.ErrInvalidTopic
			return
		}
		onlineN, e := meta.Default.OnlineConsumersCount(cluster, topic, group)
		if e != nil {
			return nil, e
		}
		if onlineN >= partitionN {
			err = store.ErrTooManyConsumers
			return
		}
	}

	this.clientMapLock.Lock()
	defer this.clientMapLock.Unlock()

	// double check lock
	cg, present = this.clientMap[remoteAddr]
	if present {
		return
	}

	// cache miss, create the consumer group for this client
	cf := consumergroup.NewConfig()
	cf.PermitStandby = permitStandby
	cf.OneToOne = true

	cf.Net.DialTimeout = time.Second * 10
	cf.Net.WriteTimeout = time.Second * 10
	cf.Net.ReadTimeout = time.Second * 10

	// kafka Fetch already batched into MessageSet,
	// this chan buf size influence on throughput is ignoreable
	cf.ChannelBufferSize = 0
	// kafka Fetch MaxWaitTime 250ms, MinByte=1 by default

	cf.Consumer.Return.Errors = true
	cf.Consumer.MaxProcessingTime = time.Second * 2 // chan recv timeout
	cf.Zookeeper.Chroot = meta.Default.ZkChroot(cluster)
	cf.Zookeeper.Timeout = zk.DefaultZkSessionTimeout()
	cf.Offsets.CommitInterval = time.Minute
	cf.Offsets.ProcessingTimeout = time.Second
	switch resetOffset {
	case "newest":
		cf.Offsets.ResetOffsets = true
		cf.Offsets.Initial = sarama.OffsetNewest
	case "oldest":
		cf.Offsets.ResetOffsets = true
		cf.Offsets.Initial = sarama.OffsetOldest
	default:
		cf.Offsets.ResetOffsets = false
		cf.Offsets.Initial = sarama.OffsetOldest
	}

	// runs in serial
	cg, err = consumergroup.JoinConsumerGroupRealIp(realIp, group, []string{topic},
		meta.Default.ZkAddrs(), cf)
	if err == nil {
		this.clientMap[remoteAddr] = cg
	}

	return
}