コード例 #1
0
ファイル: housekeep.go プロジェクト: funkygao/gafka
func (q *queue) housekeeping() {
	defer func() {
		log.Trace("queue[%s] housekeeping done", q.ident())
		q.wg.Done()
	}()

	log.Trace("queue[%s] start housekeeping...", q.ident())

	purgeTick := time.NewTicker(q.purgeInterval)
	defer purgeTick.Stop()

	cursorChkpnt := time.NewTicker(time.Second)
	defer cursorChkpnt.Stop()

	for {
		select {
		case <-purgeTick.C:
			if err := q.Purge(); err != nil {
				log.Error("queue[%s] purge: %s", q.ident(), err)
			}

		case <-cursorChkpnt.C:
			if err := q.cursor.dump(); err != nil {
				log.Error("queue[%s] cursor checkpoint: %s", q.ident(), err)
			}

		case <-q.quit:
			return
		}
	}
}
コード例 #2
0
ファイル: controller.go プロジェクト: funkygao/gafka
func (this *controller) RunForever() (err error) {
	log.Info("controller[%s] starting", this.Id())

	if err = this.orchestrator.RegisterActor(this.Id(), this.Bytes()); err != nil {
		return err
	}
	defer this.orchestrator.ResignActor(this.Id())

	if err = manager.Default.Start(); err != nil {
		return
	}
	log.Trace("manager[%s] started", manager.Default.Name())

	go this.runWebServer()

	jobDispatchQuit := make(chan struct{})
	go this.dispatchJobQueues(jobDispatchQuit)

	webhookDispatchQuit := make(chan struct{})
	go this.dispatchWebhooks(webhookDispatchQuit)

	select {
	case <-jobDispatchQuit:
		log.Warn("dispatchJobQueues quit")

	case <-webhookDispatchQuit:
		log.Warn("dispatchWebhooks quit")
	}

	manager.Default.Stop()
	log.Trace("manager[%s] stopped", manager.Default.Name())

	return
}
コード例 #3
0
ファイル: pubstore.go プロジェクト: funkygao/gafka
func (this *pubStore) watchDeadPartitions() {
	ticker := time.NewTicker(time.Minute * 2) // TODO
	defer ticker.Stop()

	var lastTopics = make(map[string]struct{})
	for {
		select {
		case <-ticker.C:
			deadPartitions := manager.Default.DeadPartitions()
			for topic, dp := range deadPartitions {
				this.markPartitionsDead(topic, dp)

				lastTopics[topic] = struct{}{}
			}

			for lastDeadTopic := range lastTopics {
				if _, present := deadPartitions[lastDeadTopic]; !present {
					// this topic was marked dead last round, but this round it comes alive

					log.Trace("%s come alive again", lastDeadTopic)

					this.markPartitionsDead(lastDeadTopic, nil)
					delete(lastTopics, lastDeadTopic)
				}
			}

		case <-this.shutdownCh:
			return
		}
	}

}
コード例 #4
0
ファイル: substore.go プロジェクト: funkygao/gafka
func (this *subStore) Start() (err error) {
	this.subManager = newSubManager()

	this.wg.Add(1)
	go func() {
		defer this.wg.Done()

		var remoteAddr string
		for {
			select {
			case <-this.shutdownCh:
				log.Trace("sub store[%s] stopped", this.Name())
				return

			case remoteAddr = <-this.closedConnCh:
				this.wg.Add(1)
				go func(id string) {
					this.subManager.killClient(id)
					this.wg.Done()
				}(remoteAddr)
			}
		}
	}()

	return
}
コード例 #5
0
ファイル: pubstore.go プロジェクト: funkygao/gafka
func (this *pubStore) Start() (err error) {
	if ctx.KafkaHome() == "" {
		return fmt.Errorf("empty kafka_home in ~/.gafka.cf")
	}
	if !gio.DirExists(ctx.KafkaHome()) {
		return fmt.Errorf("kafka not installed in %s, run 'gk deploy -kfkonly'", ctx.KafkaHome())
	}

	// warmup: create pools according the current kafka topology
	for _, cluster := range meta.Default.ClusterNames() {
		this.pubPools[cluster] = newPubPool(this, cluster,
			meta.Default.BrokerList(cluster), this.pubPoolsCapcity)
	}

	this.wg.Add(1)
	go func() {
		defer this.wg.Done()

		for {
			select {
			case <-meta.Default.RefreshEvent():
				this.doRefresh()

			case <-this.shutdownCh:
				log.Trace("pub store[%s] stopped", this.Name())
				return
			}
		}
	}()

	return
}
コード例 #6
0
ファイル: file.go プロジェクト: funkygao/gafka
func (f *File) Attr(ctx context.Context, o *fuse.Attr) error {
	f.RLock()
	defer f.RUnlock()

	*o = f.attr

	// calculate size
	if !f.opened {
		if err := f.dir.reconnectKafkaIfNecessary(); err != nil {
			return err
		}

		latestOffset, err := f.dir.GetOffset(f.topic, f.partitionId, sarama.OffsetNewest)
		if err != nil {
			log.Error(err)

			return err
		}
		oldestOffset, err := f.dir.GetOffset(f.topic, f.partitionId, sarama.OffsetOldest)
		if err != nil {
			log.Error(err)

			return err
		}

		o.Size = uint64(latestOffset - oldestOffset)
	} else {
		o.Size = uint64(len(f.content))
	}

	log.Trace("File Attr, topic=%s, partitionId=%d, size=%d", f.topic, f.partitionId, o.Size)

	return nil
}
コード例 #7
0
ファイル: queue.go プロジェクト: funkygao/gafka
// Close stops the queue for reading and writing
func (q *queue) Close() error {
	close(q.quit)
	// wait for pump and housekeeping finish
	q.wg.Wait()

	q.mu.Lock()
	defer q.mu.Unlock()

	for _, s := range q.segments {
		if err := s.Close(); err != nil {
			return err
		}
	}

	q.head = nil
	q.tail = nil
	q.segments = nil

	log.Trace("queue[%s] dumping cursor", q.ident())
	if err := q.cursor.dump(); err != nil {
		return err
	}
	q.cursor = nil
	return nil
}
コード例 #8
0
ファイル: client.go プロジェクト: lucmichalski/fae
func (this *Client) Warmup() {
	var (
		sess *mgo.Session
		err  error
		t1   = time.Now()
	)
	for retries := 0; retries < 3; retries++ {
		for _, server := range this.selector.ServerList() {
			sess, err = this.getConn(server.Uri())
			if err != nil {
				log.Error("Warmup %v fail: %s", server.Uri(), err)
				break
			} else {
				this.putFreeConn(server.Uri(), sess)
			}
		}

		if err == nil {
			break
		}
	}

	if err == nil {
		log.Trace("Mongodb warmup within %s: %+v",
			time.Since(t1), this.freeconns)
	} else {
		log.Error("Mongodb failed to warmup within %s: %s",
			time.Since(t1), err)
	}

}
コード例 #9
0
ファイル: prof.go プロジェクト: lucmichalski/fae
func (this *profiler) do(callName string, ctx *rpc.Context, format string,
	args ...interface{}) {
	if this == nil {
		return
	}

	elapsed := time.Since(this.t1)
	slow := elapsed > config.Engine.Servants.CallSlowThreshold
	if !(slow || this.on) {
		return
	}

	body := fmt.Sprintf(format, args...)
	if slow {
		svtStats.incCallSlow()

		header := fmt.Sprintf("SLOW=%s/%s Q=%s ",
			elapsed, time.Since(this.t0), callName)
		log.Warn(header + this.truncatedStr(body))
	} else if this.on {
		header := fmt.Sprintf("T=%s/%s Q=%s ",
			elapsed, time.Since(this.t0), callName)
		log.Trace(header + this.truncatedStr(body))
	}

}
コード例 #10
0
ファイル: message.go プロジェクト: funkygao/gafka
// NewMessage is the supported way to obtain a new Message.  This makes
// use of a "slab allocator" which greatly reduces the load on the
// garbage collector.
func NewMessage(size int) *Message {
	var ch chan *Message
	for _, slabClass := range messagePool { // TODO binary search
		if size <= slabClass.maxSize {
			ch = slabClass.ch
			size = slabClass.maxSize
			break
		}
	}

	var msg *Message
	select {
	case msg = <-ch:
	default:
		// message pool empty:
		// too busy or size greater than largest slab class
		log.Trace("allocating message memory pool: %dB", size)

		msg = &Message{}
		msg.slabSize = size
		msg.bodyBuf = make([]byte, 0, msg.slabSize)
	}

	msg.Body = msg.bodyBuf
	return msg
}
コード例 #11
0
ファイル: pubclient.go プロジェクト: funkygao/gafka
// Close must be called before Recycle
func (this *syncProducerClient) Close() {
	log.Trace("cluster[%s] closing kafka sync client: %d", this.cluster, this.id)

	// will close the producer and the kafka tcp conn
	this.SyncProducer.Close()
	this.closed = true
}
コード例 #12
0
ファイル: file.go プロジェクト: funkygao/gafka
func (f *File) ReadAll(ctx context.Context) ([]byte, error) {
	f.RLock()
	defer f.RUnlock()

	log.Trace("File ReadAll, topic=%s, partitionId=%d", f.topic, f.partitionId)

	return f.content, nil
}
コード例 #13
0
ファイル: file.go プロジェクト: funkygao/gafka
func (f *File) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
	log.Trace("File Release, req=%#v, topic=%s, partitionId=%d", req,
		f.topic, f.partitionId)
	f.opened = false
	close(f.closeCh)
	f.content = make([]byte, 0, 16<<10)
	return f.consumer.Close()
}
コード例 #14
0
ファイル: server_sub.go プロジェクト: funkygao/gafka
func (this *subServer) waitExit(exit <-chan struct{}) {
	<-exit

	if this.httpServer != nil {
		// HTTP response will have "Connection: close"
		this.httpServer.SetKeepAlivesEnabled(false)

		// avoid new connections
		if err := this.httpListener.Close(); err != nil {
			log.Error(err.Error())
		}

		log.Trace("%s on %s listener closed", this.name, this.httpServer.Addr)
	}

	if this.httpsServer != nil {
		// HTTP response will have "Connection: close"
		this.httpsServer.SetKeepAlivesEnabled(false)

		// avoid new connections
		if err := this.httpsListener.Close(); err != nil {
			log.Error(err.Error())
		}

		log.Trace("%s on %s listener closed", this.name, this.httpsServer.Addr)
	}

	this.idleConnsLock.Lock()
	t := time.Now().Add(time.Millisecond * 100)
	for c := range this.idleConns {
		c.SetReadDeadline(t)
	}
	this.idleConnsLock.Unlock()

	if this.idleConnsWg.WaitTimeout(Options.SubTimeout) {
		log.Warn("%s waiting for all connected client close timeout: %s",
			this.name, Options.SubTimeout)
	}

	this.subMetrics.Flush()
	this.timer.Stop()

	this.gw.wg.Done()
	close(this.closed)
}
コード例 #15
0
ファイル: pool.go プロジェクト: lucmichalski/fae
func (this *ClientPool) Warmup() {
	t1 := time.Now()
	for _, client := range this.clients {
		client.Warmup()
	}

	log.Trace("Memcache pool warmup within %s: %+v",
		time.Since(t1), this.FreeConnMap())
}
コード例 #16
0
ファイル: selector_standard.go プロジェクト: lucmichalski/fae
func (this *StandardServerSelector) KickLookupCache(pool string, hintId int) {
	if pool != this.conf.LookupPool || hintId == 0 {
		return
	}

	key := this.lookupCacheKey(pool, hintId)
	this.lookupCache.Del(key)
	log.Trace("lookupCache[%s] kicked", key)
}
コード例 #17
0
ファイル: pubfactory.go プロジェクト: funkygao/gafka
func (this *pubPool) asyncProducerFactory() (pool.Resource, error) {
	if len(this.brokerList) == 0 {
		return nil, store.ErrEmptyBrokers
	}

	apc := &asyncProducerClient{
		rp:      this.asyncPool,
		cluster: this.cluster,
		id:      atomic.AddUint64(&this.nextId, 1),
	}

	var err error
	t1 := time.Now()
	cf := sarama.NewConfig()
	cf.Net.DialTimeout = time.Second * 4
	cf.Net.ReadTimeout = time.Second * 4
	cf.Net.WriteTimeout = time.Second * 4

	cf.Metadata.RefreshFrequency = time.Minute * 10
	cf.Metadata.Retry.Max = 3
	cf.Metadata.Retry.Backoff = time.Millisecond * 10

	cf.Producer.Flush.Frequency = time.Second * 10 // TODO
	cf.Producer.Flush.Messages = 1000
	cf.Producer.Flush.MaxMessages = 0 // unlimited

	cf.Producer.RequiredAcks = sarama.NoResponse
	cf.Producer.Partitioner = NewExclusivePartitioner
	cf.Producer.Retry.Backoff = time.Millisecond * 10 // gk migrate will trigger this backoff
	cf.Producer.Retry.Max = 3
	if this.store.compress {
		cf.Producer.Compression = sarama.CompressionSnappy
	}

	cf.ClientID = this.store.hostname

	apc.AsyncProducer, err = sarama.NewAsyncProducer(this.brokerList, cf)
	if err != nil {
		return nil, err
	}

	log.Trace("cluster[%s] kafka async producer connected[%d]: %+v %s",
		this.cluster, apc.id, this.brokerList, time.Since(t1))

	// TODO
	go func() {
		// messages will only be returned here after all retry attempts are exhausted.
		for err := range apc.Errors() {
			log.Error("cluster[%s] kafka async producer: %v", this.cluster, err)
		}
	}()

	return apc, err
}
コード例 #18
0
ファイル: file.go プロジェクト: funkygao/gafka
func (f *File) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
	f.RLock()
	defer f.RUnlock()

	log.Trace("File Read, req=%#v, topic=%s, partitionId=%d", req,
		f.topic, f.partitionId)

	resp.Data = resp.Data[:req.Size]
	resp.Data = f.content[req.Offset : req.Size+int(req.Offset)]
	return nil
}
コード例 #19
0
ファイル: file.go プロジェクト: funkygao/gafka
func (f *File) Open(ctx context.Context, req *fuse.OpenRequest,
	resp *fuse.OpenResponse) (fs.Handle, error) {
	log.Trace("File Open, req=%#v, topic=%s, partitionId=%d", req,
		f.topic, f.partitionId)

	// Allow kernel to use buffer cache
	resp.Flags &^= fuse.OpenDirectIO
	f.opened = true
	f.closeCh = make(chan struct{})

	go f.readContent()
	time.Sleep(time.Second * 2) // TODO

	return f, nil
}
コード例 #20
0
ファイル: dir.go プロジェクト: chendx79/gafka
// TODO when to close the kafka client?
func (d *Dir) reconnectKafkaIfNecessary() error {
	if d.Client != nil {
		return nil
	}

	kfk, err := sarama.NewClient(d.fs.zkcluster.BrokerList(), sarama.NewConfig())
	if err != nil {
		log.Error(err)

		return err
	}

	d.Client = kfk
	log.Trace("kafka %+v connected", d.fs.zkcluster.BrokerList())
	return nil
}
コード例 #21
0
ファイル: submanager.go プロジェクト: funkygao/gafka
func (this *subManager) Stop() {
	this.clientMapLock.Lock()
	defer this.clientMapLock.Unlock()

	var wg sync.WaitGroup
	for _, cg := range this.clientMap {
		wg.Add(1)
		go func(cg *consumergroup.ConsumerGroup) {
			cg.Close() // will commit inflight offsets
			wg.Done()
		}(cg)
	}

	wg.Wait()
	log.Trace("all consumer offsets committed")
}
コード例 #22
0
ファイル: zk.go プロジェクト: chendx79/gafka
func (this *zkreg) keepalive() {
	for {
		select {
		case <-this.shutdownCh:
			return

		case evt := <-this.zkzone.SessionEvents():
			// after zk conn lost, zklib will automatically reconnect
			// but the ephemeral znodes have to be recreated by ourselves
			if evt.State == zklib.StateHasSession {
				this.zkzone.CreateEphemeralZnode(this.mypath(), this.data)

				log.Trace("registered in zk: %s", this.mypath())
			}
		}
	}
}
コード例 #23
0
ファイル: segment.go プロジェクト: funkygao/gafka
func (s *segment) Remove() (err error) {
	if s.wfile == nil {
		return ErrSegmentNotOpen
	}

	path := s.wfile.Name()
	log.Trace("segment[%s] removed", path)

	if err = s.Close(); err != nil {
		return
	}
	if err = os.Remove(path); err != nil {
		return
	}

	return
}
コード例 #24
0
ファイル: rpc_server.go プロジェクト: lucmichalski/fae
func (this *TFunServer) handleSession(client thrift.TTransport) {
	var (
		calls           int64 // #calls within this session
		errs            int64 // #errs within this session
		t1              = time.Now()
		currentSessionN = atomic.AddInt64(&this.activeSessionN, 1)
		tcpClient       = client.(*thrift.TSocket).Conn().(*net.TCPConn)
		remoteAddr      = tcpClient.RemoteAddr().String()
		processor       = this.processorFactory.GetProcessor(client)
		inputTransport  = this.inputTransportFactory.GetTransport(client)
		outputTransport = this.outputTransportFactory.GetTransport(client)
		inputProtocol   = this.inputProtocolFactory.GetProtocol(inputTransport)
		outputProtocol  = this.outputProtocolFactory.GetProtocol(outputTransport)
	)

	atomic.AddInt64(&this.cumSessions, 1)
	log.Debug("session[%s]#%d open", remoteAddr, currentSessionN)

	if calls, errs = this.serveCalls(tcpClient, remoteAddr, processor,
		inputProtocol, outputProtocol); errs > 0 {
		atomic.AddInt64(&this.cumCallErrs, errs)
	}
	atomic.AddInt64(&this.cumCalls, calls)

	// server actively closes the socket
	if inputTransport != nil {
		inputTransport.Close()
	}
	if outputTransport != nil {
		outputTransport.Close()
	}

	currentSessionN = atomic.AddInt64(&this.activeSessionN, -1) + 1
	elapsed := time.Since(t1)

	if errs > 0 {
		log.Warn("session[%s]#%d %d calls in %s, errs:%d", remoteAddr,
			currentSessionN, calls, elapsed, errs)
	} else {
		log.Trace("session[%s]#%d %d calls in %s", remoteAddr,
			currentSessionN, calls, elapsed)
	}
}
コード例 #25
0
ファイル: monitor.go プロジェクト: lucmichalski/fae
func watchFaes() {
	ch := make(chan []string, 10)
	go etclib.WatchService(etclib.SERVICE_FAE, ch)

	for {
		select {
		case <-ch:
			endpoints, err := etclib.ServiceEndpoints(etclib.SERVICE_FAE)
			if err == nil {
				log.Trace("fae endpoints updated: %+v", endpoints)

				dumpFaeConfigPhp(endpoints)
			} else {
				log.Error("fae: %s", err)
			}
		}
	}

	log.Warn("fae watcher died")
}
コード例 #26
0
ファイル: file.go プロジェクト: funkygao/gafka
func (f *File) readContent() {
	if err := f.reconsume(sarama.OffsetOldest); err != nil {
		log.Error(err)
		return
	}

	var msg *sarama.ConsumerMessage
	for {
		select {
		case <-f.closeCh:
			log.Trace("File readContent quit, topic=%s, partitionId=%d",
				f.topic, f.partitionId)
			return

		case msg = <-f.consumer.Messages():
			f.content = append(f.content, msg.Value...)
			f.content = append(f.content, '\n')
		}
	}
}
コード例 #27
0
ファイル: jobqueues.go プロジェクト: funkygao/gafka
func (this *controller) invokeJobExexutor(jobQueue string, wg *sync.WaitGroup, stopper <-chan struct{}) {
	defer func() {
		wg.Done()
		this.JobExecutorN.Add(-1)
	}()

	var err error
	for retries := 0; retries < 3; retries++ {
		log.Trace("claiming owner of %s #%d", jobQueue, retries)
		if err = this.orchestrator.ClaimResource(this.Id(), zk.PubsubJobQueueOwners, jobQueue); err == nil {
			log.Info("claimed owner of %s", jobQueue)
			break
		} else if err == zk.ErrClaimedByOthers {
			log.Error("%s #%d", err, retries)
			time.Sleep(time.Second)
		} else {
			log.Error("%s #%d", err, retries)
			return
		}
	}

	if err != nil {
		// still err(ErrClaimedByOthers) encountered after max retries
		return
	}

	defer func(q string) {
		this.orchestrator.ReleaseResource(this.Id(), zk.PubsubJobQueueOwners, q)
		log.Info("de-claimed owner of %s", q)
	}(jobQueue)

	cluster, err := this.orchestrator.JobQueueCluster(jobQueue)
	if err != nil {
		log.Error(err)
	}

	exe := executor.NewJobExecutor(this.shortId, cluster, jobQueue, this.mc, stopper, this.auditor)
	exe.Run()

}
コード例 #28
0
ファイル: monitor.go プロジェクト: lucmichalski/fae
func watchMaintain() {
	const PATH = "/maintain"

	ch := make(chan []string, 10)
	go etclib.WatchChildren(PATH, ch)

	for {
		select {
		case <-ch:
			kingdoms, err := etclib.Children(PATH)
			if err == nil {
				log.Trace("maintain kingdoms updated: %+v", kingdoms)

				dumpMaintainConfigPhp(kingdoms)
			} else {
				log.Error("maintain kingdom: %s", err)
			}
		}
	}

	log.Warn("maintain watcher died")
}
コード例 #29
0
ファイル: webhooks.go プロジェクト: funkygao/gafka
func (this *controller) invokeWebhookExecutor(topic string, wg *sync.WaitGroup, stopper <-chan struct{}) {
	defer func() {
		wg.Done()
		this.WebhookExecutorN.Add(-1)
	}()

	hook, err := this.orchestrator.WebhookInfo(topic)
	if err != nil {
		log.Error("%s: %s", topic, err)
		return
	}

	for retries := 0; retries < 3; retries++ {
		log.Trace("claiming owner of %s #%d", topic, retries)
		if err = this.orchestrator.ClaimResource(this.Id(), zk.PubsubWebhookOwners, topic); err == nil {
			log.Info("claimed owner of %s", topic)
			break
		} else if err == zk.ErrClaimedByOthers {
			log.Error("%s #%d", err, retries)
			time.Sleep(time.Second)
		} else {
			log.Error("%s #%d", err, retries)
			return
		}
	}

	if err != nil {
		// still err(ErrClaimedByOthers) encountered after max retries
		return
	}

	defer func(topic string) {
		this.orchestrator.ReleaseResource(this.Id(), zk.PubsubWebhookOwners, topic)
		log.Info("de-claimed owner of %s", topic)
	}(topic)

	exe := executor.NewWebhookExecutor(this.shortId, hook.Cluster, topic, hook.Endpoints, stopper, this.auditor)
	exe.Run()
}
コード例 #30
0
ファイル: dir.go プロジェクト: chendx79/gafka
func (d *Dir) Lookup(ctx context.Context, name string) (fs.Node, error) {
	d.RLock()
	defer d.RUnlock()

	log.Trace("Dir Lookup, name=%s", name)

	// split the name into topic and partitionId
	partitionOffset := -1
	for i := len(name) - 1; i > 0; i-- {
		if name[i] == '.' {
			partitionOffset = i
		}
	}
	if partitionOffset == -1 {
		return nil, fuse.ENOENT
	}

	topic := name[:partitionOffset]
	partitionId, _ := strconv.Atoi(name[partitionOffset+1:])

	return d.fs.newFile(d, topic, int32(partitionId), os.FileMode(0555)), nil
}