示例#1
0
文件: socket.go 项目: jrossi/gollum
// Consume listens to a given socket.
func (cons *Socket) Consume(workers *sync.WaitGroup) {
	cons.AddMainWorker(workers)

	if cons.protocol == "udp" {
		go shared.DontPanic(cons.udpAccept)
		cons.SetFuseBurnedCallback(cons.closeConnection)
		defer cons.closeConnection()
	} else {
		go shared.DontPanic(cons.tcpAccept)
		cons.SetFuseBurnedCallback(cons.closeTCPConnection)
		defer cons.closeTCPConnection()
	}

	cons.ControlLoop()
}
示例#2
0
// Flush writes the content of the buffer to a given resource and resets the
// internal state, i.e. the buffer is empty after a call to Flush.
// Writing will be done in a separate go routine to be non-blocking.
//
// The validate callback will be called after messages have been successfully
// written to the io.Writer.
// If validate returns false the buffer will not be resetted (automatic retry).
// If validate is nil a return value of true is assumed (buffer reset).
//
// The onError callback will be called if the io.Writer returned an error.
// If onError returns false the buffer will not be resetted (automatic retry).
// If onError is nil a return value of true is assumed (buffer reset).
func (batch *MessageBatch) Flush(assemble AssemblyFunc) {
	if batch.IsEmpty() {
		return // ### return, nothing to do ###
	}

	// Only one flush at a time
	batch.flushing.IncWhenDone()

	// Switch the buffers so writers can go on writing
	flushSet := atomic.SwapUint32(&batch.activeSet, (batch.activeSet&messageBatchIndexMask)^messageBatchIndexMask)

	flushIdx := flushSet >> messageBatchIndexShift
	writerCount := flushSet & messageBatchCountMask
	flushQueue := &batch.queue[flushIdx]
	spin := shared.NewSpinner(shared.SpinPriorityHigh)

	// Wait for remaining writers to finish
	for writerCount != atomic.LoadUint32(&flushQueue.doneCount) {
		spin.Yield()
	}

	// Write data and reset buffer asynchronously
	go shared.DontPanic(func() {
		defer batch.flushing.Done()

		messageCount := shared.MinI(int(writerCount), len(flushQueue.messages))
		assemble(flushQueue.messages[:messageCount])
		atomic.StoreUint32(&flushQueue.doneCount, 0)
		batch.Touch()
	})
}
示例#3
0
// Consume listens to stdin.
func (cons *File) Consume(workers *sync.WaitGroup) {
	cons.setState(fileStateOpen)
	defer cons.setState(fileStateDone)

	go shared.DontPanic(func() {
		cons.AddMainWorker(workers)
		cons.read()
	})

	cons.ControlLoop()
}
示例#4
0
// Main fetch loop for kafka events
func (cons *Kafka) readFromPartition(partitionID int32) {
	partCons, err := cons.consumer.ConsumePartition(cons.topic, partitionID, cons.offsets[partitionID])
	if err != nil {
		if !cons.client.Closed() {
			go shared.DontPanic(func() {
				cons.retry(partitionID, err)
			})
		}
		return // ### return, stop this consumer ###
	}

	// Make sure we wait for all consumers to end

	cons.AddWorker()
	defer func() {
		if !cons.client.Closed() {
			partCons.Close()
		}
		cons.WorkerDone()
	}()

	// Loop over worker
	spin := shared.NewSpinner(shared.SpinPriorityLow)

	for !cons.client.Closed() {
		cons.WaitOnFuse()
		select {
		case event := <-partCons.Messages():
			cons.offsets[partitionID] = event.Offset

			// Offset is always relative to the partition, so we create "buckets"
			// i.e. we are able to reconstruct partiton and local offset from
			// the sequence number.
			//
			// To generate this we use:
			// seq = offset * numPartition + partitionId
			//
			// Reading can be done via:
			// seq % numPartition = partitionId
			// seq / numPartition = offset

			sequence := uint64(event.Offset*int64(cons.MaxPartitionID) + int64(partitionID))
			cons.Enqueue(event.Value, sequence)

		case err := <-partCons.Errors():
			Log.Error.Print("Kafka consumer error:", err)

		default:
			spin.Yield()
		}
	}
}
示例#5
0
// Consume listens to a given socket.
func (cons *Proxy) Consume(workers *sync.WaitGroup) {
	var err error

	if cons.listen, err = net.Listen(cons.protocol, cons.address); err != nil {
		Log.Error.Print("Proxy connection error: ", err)
		return
	}

	go shared.DontPanic(func() {
		cons.AddMainWorker(workers)
		cons.accept()
	})

	defer cons.listen.Close()
	cons.ControlLoop()
}
示例#6
0
// Start one consumer per partition as a go routine
func (cons *Kafka) startConsumers() error {
	var err error

	cons.client, err = kafka.NewClient(cons.servers, cons.config)
	if err != nil {
		return err
	}

	cons.consumer, err = kafka.NewConsumerFromClient(cons.client)
	if err != nil {
		return err
	}

	partitions, err := cons.client.Partitions(cons.topic)
	if err != nil {
		return err
	}

	for _, partition := range partitions {
		if _, mapped := cons.offsets[partition]; !mapped {
			cons.offsets[partition] = cons.defaultOffset
		}
		if partition > cons.MaxPartitionID {
			cons.MaxPartitionID = partition
		}
	}

	for _, partition := range partitions {
		partition := partition
		if _, mapped := cons.offsets[partition]; !mapped {
			cons.offsets[partition] = cons.defaultOffset
		}

		go shared.DontPanic(func() {
			cons.readFromPartition(partition)
		})
	}

	return nil
}
示例#7
0
// Run the multiplexer.
// Fetch messags from the consumers and pass them to all producers.
func (plex multiplexer) run() {
	if len(plex.consumers) == 0 {
		Log.Error.Print("No consumers configured.")
		Log.SetWriter(os.Stdout)
		return // ### return, nothing to do ###
	}

	if len(plex.producers) == 0 {
		Log.Error.Print("No producers configured.")
		Log.SetWriter(os.Stdout)
		return // ### return, nothing to do ###
	}

	defer plex.shutdown()

	// Launch producers
	plex.state = multiplexerStateStartProducers
	for _, producer := range plex.producers {
		producer := producer
		Log.Debug.Print("Starting ", reflect.TypeOf(producer))
		go shared.DontPanic(func() {
			producer.Produce(plex.producerWorker)
		})
	}

	// If there are intenal log listeners switch to stream mode
	if core.StreamRegistry.IsStreamRegistered(core.LogInternalStreamID) {
		Log.Debug.Print("Binding log to ", reflect.TypeOf(plex.consumers[0]))
		Log.SetWriter(plex.consumers[0].(*core.LogConsumer))
	} else {
		Log.SetWriter(os.Stdout)
	}

	// Launch consumers
	plex.state = multiplexerStateStartConsumers
	for _, consumer := range plex.consumers {
		consumer := consumer
		Log.Debug.Print("Starting ", reflect.TypeOf(consumer))
		go shared.DontPanic(func() {
			consumer.Consume(plex.consumerWorker)
		})
	}

	// Main loop - wait for exit
	// Apache is using SIG_USR1 in some cases to signal child processes.
	// This signal is not available on windows

	plex.signal = newSignalHandler()

	Log.Note.Print("We be nice to them, if they be nice to us. (startup)")
	measure := time.Now()
	timer := time.NewTicker(5 * time.Second)

	for {
		select {
		case <-timer.C:
			duration := time.Since(measure)
			measure = time.Now()

			// Sampling values
			messageCount, droppedCount, discardedCount, filteredCount, noRouteCount := core.GetAndResetMessageCount()
			messageSec := float64(messageCount) / duration.Seconds()

			shared.Metric.SetF(metricMessagesSec, messageSec)
			shared.Metric.SetF(metricDroppedSec, float64(droppedCount)/duration.Seconds())
			shared.Metric.SetF(metricDiscardedSec, float64(discardedCount)/duration.Seconds())
			shared.Metric.SetF(metricFilteredSec, float64(filteredCount)/duration.Seconds())
			shared.Metric.SetF(metricNoRouteSec, float64(noRouteCount)/duration.Seconds())

			shared.Metric.Add(metricMessages, int64(messageCount))
			shared.Metric.Add(metricDropped, int64(droppedCount))
			shared.Metric.Add(metricDiscarded, int64(discardedCount))
			shared.Metric.Add(metricFiltered, int64(filteredCount))
			shared.Metric.Add(metricNoRoute, int64(noRouteCount))

			if plex.profile {
				Log.Note.Printf("Processed %.2f msg/sec", messageSec)
			}

			// Blocked producers
			numBlockedProducers := 0
			for _, prod := range plex.producers {
				if prod.IsBlocked() {
					numBlockedProducers++
				}
			}
			shared.Metric.SetI(metricBlockedProducers, numBlockedProducers)

		case sig := <-plex.signal:
			switch translateSignal(sig) {
			case signalExit:
				Log.Note.Print("Master betrayed us. Wicked. Tricksy, False. (signal)")
				plex.state = multiplexerStateShutdown
				return // ### return, exit requested ###

			case signalRoll:
				for _, consumer := range plex.consumers {
					consumer.Control() <- core.PluginControlRoll
				}
				for _, producer := range plex.producers {
					producer.Control() <- core.PluginControlRoll
				}

			default:
			}
		}
	}
}
示例#8
0
// Consume starts a profile run and exits gollum when done
func (cons *Profiler) Consume(workers *sync.WaitGroup) {
	cons.AddMainWorker(workers)

	go shared.DontPanic(cons.profile)
	cons.ControlLoop()
}