예제 #1
0
파일: socket.go 프로젝트: oopcode/gollum
// Consume listens to a given socket.
func (cons *Socket) Consume(workers *sync.WaitGroup) {
	var err error
	var listen func()

	cons.quit = false

	if cons.protocol == "udp" {
		addr, _ := net.ResolveUDPAddr(cons.protocol, cons.address)
		if cons.listen, err = net.ListenUDP(cons.protocol, addr); err != nil {
			Log.Error.Print("Socket connection error: ", err)
			return
		}
		listen = cons.udpAccept
	} else {
		if cons.listen, err = net.Listen(cons.protocol, cons.address); err != nil {
			Log.Error.Print("Socket connection error: ", err)
			return
		}
		listen = cons.tcpAccept
	}

	go func() {
		defer shared.RecoverShutdown()
		cons.AddMainWorker(workers)
		listen()
	}()

	defer func() {
		cons.quit = true
		cons.listen.Close()
	}()

	cons.DefaultControlLoop(nil)
}
예제 #2
0
파일: file.go 프로젝트: oopcode/gollum
// Consume listens to stdin.
func (cons *File) Consume(workers *sync.WaitGroup) {
	cons.setState(fileStateOpen)
	defer cons.setState(fileStateDone)

	go func() {
		defer shared.RecoverShutdown()
		cons.AddMainWorker(workers)
		cons.read()
	}()

	cons.DefaultControlLoop(cons.onRoll)
}
예제 #3
0
파일: loopback.go 프로젝트: oopcode/gollum
// Consume is fetching and forwarding messages from the feedbackQueue
func (cons *LoopBack) Consume(workers *sync.WaitGroup) {
	cons.quit = false
	defer func() { cons.quit = true }()

	go func() {
		defer shared.RecoverShutdown()
		cons.AddMainWorker(workers)
		cons.processFeedbackQueue()
	}()

	cons.DefaultControlLoop(nil)
}
예제 #4
0
파일: kafka.go 프로젝트: aaukt/gollum
// Main fetch loop for kafka events
func (cons *Kafka) readFromPartition(partitionID int32) {
	partCons, err := cons.consumer.ConsumePartition(cons.topic, partitionID, cons.offsets[partitionID])
	if err != nil {
		if !cons.client.Closed() {
			go func() {
				defer shared.RecoverShutdown()
				cons.retry(partitionID, err)
			}()
		}
		return // ### return, stop this consumer ###
	}

	// Make sure we wait for all consumers to end

	cons.AddWorker()
	defer func() {
		if !cons.client.Closed() {
			partCons.Close()
		}
		cons.WorkerDone()
	}()

	// Loop over worker

	for !cons.client.Closed() {
		select {
		case event := <-partCons.Messages():
			cons.offsets[partitionID] = event.Offset

			// Offset is always relative to the partition, so we create "buckets"
			// i.e. we are able to reconstruct partiton and local offset from
			// the sequence number.
			//
			// To generate this we use:
			// seq = offset * numPartition + partitionId
			//
			// Reading can be done via:
			// seq % numPartition = partitionId
			// seq / numPartition = offset

			sequence := uint64(event.Offset*int64(cons.MaxPartitionID) + int64(partitionID))
			cons.Enqueue(event.Value, sequence)

		case err := <-partCons.Errors():
			Log.Error.Print("Kafka consumer error:", err)

		default:
			runtime.Gosched()
		}
	}
}
예제 #5
0
func listenToProxyClient(conn net.Conn, proxy *Proxy) {
	defer shared.RecoverShutdown()
	defer conn.Close()

	conn.SetDeadline(time.Time{})

	client := proxyClient{
		proxy:     proxy,
		conn:      conn,
		connected: true,
	}

	client.read()
}
예제 #6
0
파일: profiler.go 프로젝트: oopcode/gollum
// Consume starts a profile run and exits gollum when done
func (cons *Profiler) Consume(workers *sync.WaitGroup) {
	cons.quit = false
	cons.AddMainWorker(workers)

	go func() {
		defer shared.RecoverShutdown()
		cons.profile()
		cons.WorkerDone()
	}()

	defer func() {
		cons.quit = true
	}()

	cons.DefaultControlLoop(nil)
}
예제 #7
0
// Flush writes the content of the buffer to a given resource and resets the
// internal state, i.e. the buffer is empty after a call to Flush.
// Writing will be done in a separate go routine to be non-blocking.
//
// The validate callback will be called after messages have been successfully
// written to the io.Writer.
// If validate returns false the buffer will not be resetted (automatic retry).
// If validate is nil a return value of true is assumed (buffer reset).
//
// The onError callback will be called if the io.Writer returned an error.
// If onError returns false the buffer will not be resetted (automatic retry).
// If onError is nil a return value of true is assumed (buffer reset).
func (batch *MessageBatch) Flush(resource io.Writer, validate func() bool, onError func(error) bool) {
	if batch.IsEmpty() {
		return // ### return, nothing to do ###
	}

	// Only one flush at a time
	batch.flushing.Lock()

	// Switch the buffers so writers can go on writing
	// If a previous flush failed we need to continue where we stopped

	var flushSet uint32
	if batch.activeSet&0x80000000 != 0 {
		flushSet = atomic.SwapUint32(&batch.activeSet, 0|batch.queue[0].doneCount)
	} else {
		flushSet = atomic.SwapUint32(&batch.activeSet, 0x80000000|batch.queue[1].doneCount)
	}

	flushIdx := flushSet >> 31
	writerCount := flushSet & 0x7FFFFFFF
	flushQueue := &batch.queue[flushIdx]

	// Wait for remaining writers to finish
	for writerCount != flushQueue.doneCount {
		runtime.Gosched()
	}

	// Write data and reset buffer asynchronously
	go func() {
		defer shared.RecoverShutdown()
		defer batch.flushing.Unlock()

		_, err := resource.Write(flushQueue.buffer[:flushQueue.contentLen])

		if err == nil {
			if validate == nil || validate() {
				flushQueue.reset()
			}
		} else {
			if onError == nil || onError(err) {
				flushQueue.reset()
			}
		}

		batch.Touch()
	}()
}
예제 #8
0
파일: socket.go 프로젝트: oopcode/gollum
func (cons *Socket) tcpAccept() {
	defer cons.WorkerDone()

	listener := cons.listen.(net.Listener)
	for !cons.quit {
		client, err := listener.Accept()
		if err != nil {
			if !cons.quit {
				Log.Error.Print("Socket listen failed: ", err)
			}
			break // ### break ###
		}

		go func() {
			defer shared.RecoverShutdown()
			cons.AddWorker()
			cons.readFromConnection(client)
		}()
	}
}
예제 #9
0
파일: kafka.go 프로젝트: aaukt/gollum
// Start one consumer per partition as a go routine
func (cons *Kafka) startConsumers() error {
	var err error

	cons.client, err = kafka.NewClient(cons.servers, cons.config)
	if err != nil {
		return err
	}

	cons.consumer, err = kafka.NewConsumerFromClient(cons.client)
	if err != nil {
		return err
	}

	partitions, err := cons.client.Partitions(cons.topic)
	if err != nil {
		return err
	}

	for _, partition := range partitions {
		if _, mapped := cons.offsets[partition]; !mapped {
			cons.offsets[partition] = cons.defaultOffset
		}
		if partition > cons.MaxPartitionID {
			cons.MaxPartitionID = partition
		}
	}

	for _, partition := range partitions {
		partition := partition
		if _, mapped := cons.offsets[partition]; !mapped {
			cons.offsets[partition] = cons.defaultOffset
		}

		go func() {
			defer shared.RecoverShutdown()
			cons.readFromPartition(partition)
		}()
	}

	return nil
}
예제 #10
0
파일: proxy.go 프로젝트: oopcode/gollum
// Consume listens to a given socket.
func (cons *Proxy) Consume(workers *sync.WaitGroup) {
	var err error
	cons.quit = false

	if cons.listen, err = net.Listen(cons.protocol, cons.address); err != nil {
		Log.Error.Print("Proxy connection error: ", err)
		return
	}

	go func() {
		defer shared.RecoverShutdown()
		cons.AddMainWorker(workers)
		cons.accept()
	}()

	defer func() {
		cons.quit = true
		cons.listen.Close()
	}()

	cons.DefaultControlLoop(nil)
}
예제 #11
0
// Run the multiplexer.
// Fetch messags from the consumers and pass them to all producers.
func (plex multiplexer) run() {
	if len(plex.consumers) == 0 {
		Log.Error.Print("No consumers configured.")
		return // ### return, nothing to do ###
	}

	if len(plex.producers) == 0 {
		Log.Error.Print("No producers configured.")
		return // ### return, nothing to do ###
	}

	defer plex.shutdown()

	// Launch producers
	plex.state = multiplexerStateStartProducers
	for _, producer := range plex.producers {
		producer := producer
		go func() {
			defer shared.RecoverShutdown()
			producer.Produce(plex.producerWorker)
		}()
	}

	// If there are intenal log listeners switch to stream mode
	if core.StreamTypes.IsStreamRegistered(core.LogInternalStreamID) {
		Log.SetWriter(plex.consumers[0].(*core.LogConsumer))
	}

	// Launch consumers
	plex.state = multiplexerStateStartConsumers
	for _, consumer := range plex.consumers {
		consumer := consumer
		go func() {
			defer shared.RecoverShutdown()
			consumer.Consume(plex.consumerWorker)
		}()
	}

	// Main loop - wait for exit
	// Apache is using SIG_USR1 in some cases to signal child processes.
	// This signal is not available on windows

	plex.signal = newSignalHandler()

	Log.Note.Print("We be nice to them, if they be nice to us. (startup)")
	measure := time.Now()
	timer := time.NewTicker(time.Duration(2) * time.Second)

	for {
		select {
		case <-timer.C:
			duration := time.Since(measure)
			measure = time.Now()

			// Sampling based values
			messageCount := core.GetAndResetMessageCount()
			value := float64(messageCount) / duration.Seconds()
			shared.Metric.SetF(metricMsgSec, value)
			shared.Metric.Add(metricMessages, int64(messageCount))

			if plex.profile {
				Log.Note.Printf("Processed %.2f msg/sec", value)
			}

			// Global values
			timeSinceStart := time.Since(shared.ProcessStartTime)
			if totalMessages, err := shared.Metric.Get(metricMessages); err == nil {
				value = float64(totalMessages) / timeSinceStart.Seconds()
				shared.Metric.SetF(metricMsgSecAvg, value)
			}

		case sig := <-plex.signal:
			switch translateSignal(sig) {
			case signalExit:
				Log.Note.Print("Master betrayed us. Wicked. Tricksy, False. (signal)")
				plex.state = multiplexerStateShutdown
				return // ### return, exit requested ###

			case signalRoll:
				for _, consumer := range plex.consumers {
					consumer.Control() <- core.PluginControlRoll
				}
				for _, producer := range plex.producers {
					producer.Control() <- core.PluginControlRoll
				}

			default:
			}
		}
	}
}