コード例 #1
0
ファイル: messagebatch.go プロジェクト: pombredanne/gollum-1
// Flush writes the content of the buffer to a given resource and resets the
// internal state, i.e. the buffer is empty after a call to Flush.
// Writing will be done in a separate go routine to be non-blocking.
//
// The validate callback will be called after messages have been successfully
// written to the io.Writer.
// If validate returns false the buffer will not be resetted (automatic retry).
// If validate is nil a return value of true is assumed (buffer reset).
//
// The onError callback will be called if the io.Writer returned an error.
// If onError returns false the buffer will not be resetted (automatic retry).
// If onError is nil a return value of true is assumed (buffer reset).
func (batch *MessageBatch) Flush(assemble AssemblyFunc) {
	if batch.IsEmpty() {
		return // ### return, nothing to do ###
	}

	// Only one flush at a time
	batch.flushing.IncWhenDone()

	// Switch the buffers so writers can go on writing
	flushSet := atomic.SwapUint32(&batch.activeSet, (batch.activeSet&messageBatchIndexMask)^messageBatchIndexMask)

	flushIdx := flushSet >> messageBatchIndexShift
	writerCount := flushSet & messageBatchCountMask
	flushQueue := &batch.queue[flushIdx]
	spin := shared.NewSpinner(shared.SpinPriorityHigh)

	// Wait for remaining writers to finish
	for writerCount != atomic.LoadUint32(&flushQueue.doneCount) {
		spin.Yield()
	}

	// Write data and reset buffer asynchronously
	go shared.DontPanic(func() {
		defer batch.flushing.Done()

		messageCount := shared.MinI(int(writerCount), len(flushQueue.messages))
		assemble(flushQueue.messages[:messageCount])
		atomic.StoreUint32(&flushQueue.doneCount, 0)
		batch.Touch()
	})
}
コード例 #2
0
ファイル: message.go プロジェクト: jrossi/gollum
// Enqueue is a convenience function to push a message to a channel while
// waiting for a timeout instead of just blocking.
// Passing a timeout of -1 will discard the message.
// Passing a timout of 0 will always block.
// Messages that time out will be passed to the dropped queue if a Dropped
// consumer exists.
// The source parameter is used when a message is dropped, i.e. it is passed
// to the Drop function.
func (msg Message) Enqueue(channel chan<- Message, timeout time.Duration) MessageState {
	if timeout == 0 {
		channel <- msg
		return MessageStateOk // ### return, done ###
	}

	start := time.Time{}
	spin := shared.Spinner{}
	for {
		select {
		case channel <- msg:
			return MessageStateOk // ### return, done ###

		default:
			switch {
			// Start timeout based retries
			case start.IsZero():
				if timeout < 0 {
					return MessageStateDiscard // ### return, discard and ignore ###
				}
				start = time.Now()
				spin = shared.NewSpinner(shared.SpinPriorityHigh)

			// Discard message after timeout
			case time.Since(start) > timeout:
				return MessageStateTimeout // ### return, drop and retry ###

			// Yield and try again
			default:
				spin.Yield()
			}
		}
	}
}
コード例 #3
0
ファイル: messagebatch.go プロジェクト: pombredanne/gollum-1
// AppendOrBlock works like Append but will block until Append returns true.
// If the batch was closed during this call, false is returned.
func (batch *MessageBatch) AppendOrBlock(msg Message) bool {
	spin := shared.NewSpinner(shared.SpinPriorityMedium)
	for !batch.IsClosed() {
		if batch.Append(msg) {
			return true // ### return, success ###
		}
		spin.Yield()
	}

	return false
}
コード例 #4
0
ファイル: kafka.go プロジェクト: pombredanne/gollum-1
// Main fetch loop for kafka events
func (cons *Kafka) readFromPartition(partitionID int32) {
	partCons, err := cons.consumer.ConsumePartition(cons.topic, partitionID, cons.offsets[partitionID])
	if err != nil {
		if !cons.client.Closed() {
			go shared.DontPanic(func() {
				cons.retry(partitionID, err)
			})
		}
		return // ### return, stop this consumer ###
	}

	// Make sure we wait for all consumers to end

	cons.AddWorker()
	defer func() {
		if !cons.client.Closed() {
			partCons.Close()
		}
		cons.WorkerDone()
	}()

	// Loop over worker
	spin := shared.NewSpinner(shared.SpinPriorityLow)

	for !cons.client.Closed() {
		cons.WaitOnFuse()
		select {
		case event := <-partCons.Messages():
			cons.offsets[partitionID] = event.Offset

			// Offset is always relative to the partition, so we create "buckets"
			// i.e. we are able to reconstruct partiton and local offset from
			// the sequence number.
			//
			// To generate this we use:
			// seq = offset * numPartition + partitionId
			//
			// Reading can be done via:
			// seq % numPartition = partitionId
			// seq / numPartition = offset

			sequence := uint64(event.Offset*int64(cons.MaxPartitionID) + int64(partitionID))
			cons.Enqueue(event.Value, sequence)

		case err := <-partCons.Errors():
			Log.Error.Print("Kafka consumer error:", err)

		default:
			spin.Yield()
		}
	}
}
コード例 #5
0
ファイル: producer.go プロジェクト: jrossi/gollum
// WaitForDependencies waits until all dependencies reach the given runstate.
// A timeout > 0 can be given to work around possible blocking situations.
func (prod *ProducerBase) WaitForDependencies(waitForState PluginState, timeout time.Duration) {
	spinner := shared.NewSpinner(shared.SpinPriorityMedium)
	for _, dep := range prod.dependencies {
		start := time.Now()
		for dep.GetState() < waitForState {
			spinner.Yield()
			if timeout > 0 && time.Since(start) > timeout {
				Log.Warning.Printf("WaitForDependencies call timed out for %T", dep)
				break // ### break loop, timeout ###
			}
		}
	}
}
コード例 #6
0
ファイル: websocket.go プロジェクト: pombredanne/gollum-1
func (prod *Websocket) pushMessage(msg core.Message) {
	messageText, _ := prod.ProducerBase.Format(msg)

	if prod.clientIdx&0x7FFFFFFF > 0 {
		// There are new clients available
		currentIdx := prod.clientIdx >> 31
		activeIdx := (currentIdx + 1) & 1

		// Store away the current client list and reset it
		activeConns := &prod.clients[activeIdx]
		oldConns := activeConns.conns
		activeConns.conns = activeConns.conns[:]
		activeConns.doneCount = 0

		// Switch new and current client list
		if currentIdx == 0 {
			currentIdx = atomic.SwapUint32(&prod.clientIdx, 1<<31)
		} else {
			currentIdx = atomic.SwapUint32(&prod.clientIdx, 0)
		}

		// Wait for new list writer to finish
		count := currentIdx & 0x7FFFFFFF
		currentIdx = currentIdx >> 31
		spin := shared.NewSpinner(shared.SpinPriorityHigh)

		for prod.clients[currentIdx].doneCount != count {
			spin.Yield()
		}

		// Add new connections to old connections
		newConns := &prod.clients[currentIdx]
		newConns.conns = append(oldConns, newConns.conns...)
	}

	// Process the active connections
	activeIdx := ((prod.clientIdx >> 31) + 1) & 1
	activeConns := &prod.clients[activeIdx]

	for i := 0; i < len(activeConns.conns); i++ {
		client := activeConns.conns[i]
		if _, err := client.Write(messageText); err != nil {
			activeConns.conns = append(activeConns.conns[:i], activeConns.conns[i+1:]...)
			if closeErr := client.Close(); closeErr == nil {
				Log.Error.Print("Websocket: ", err)
			}
			i--
		}
	}
}
コード例 #7
0
ファイル: filestate.go プロジェクト: pombredanne/gollum-1
func (state *fileState) compressAndCloseLog(sourceFile *os.File) {
	state.bgWriter.Add(1)
	defer state.bgWriter.Done()

	// Generate file to zip into
	sourceFileName := sourceFile.Name()
	sourceDir, sourceBase, _ := shared.SplitPath(sourceFileName)

	targetFileName := fmt.Sprintf("%s/%s.gz", sourceDir, sourceBase)

	targetFile, err := os.OpenFile(targetFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
	if err != nil {
		Log.Error.Print("File compress error:", err)
		sourceFile.Close()
		return
	}

	// Create zipfile and compress data
	Log.Note.Print("Compressing " + sourceFileName)

	sourceFile.Seek(0, 0)
	targetWriter := gzip.NewWriter(targetFile)
	spin := shared.NewSpinner(shared.SpinPriorityHigh)

	for err == nil {
		_, err = io.CopyN(targetWriter, sourceFile, 1<<20) // 1 MB chunks
		spin.Yield()                                       // Be async!
	}

	// Cleanup
	sourceFile.Close()
	targetWriter.Close()
	targetFile.Close()

	if err != nil && err != io.EOF {
		Log.Warning.Print("Compression failed:", err)
		err = os.Remove(targetFileName)
		if err != nil {
			Log.Error.Print("Compressed file remove failed:", err)
		}
		return
	}

	// Remove original log
	err = os.Remove(sourceFileName)
	if err != nil {
		Log.Error.Print("Uncompressed file remove failed:", err)
	}
}
コード例 #8
0
ファイル: consumer.go プロジェクト: jrossi/gollum
func (cons *ConsumerBase) fuseControlLoop() {
	if cons.fuse == nil {
		return // ### return, no fuse attached ###
	}
	spin := shared.NewSpinner(shared.SpinPrioritySuspend)
	for cons.IsActive() {
		// If the fuse is burned: callback, wait, callback
		if cons.IsFuseBurned() {
			cons.Control() <- PluginControlFuseBurn
			cons.WaitOnFuse()
			cons.Control() <- PluginControlFuseActive
		} else {
			spin.Yield()
		}
	}
}
コード例 #9
0
ファイル: file.go プロジェクト: pombredanne/gollum-1
func (cons *File) read() {
	defer cons.close()

	sendFunction := cons.Enqueue
	if cons.offsetFileName != "" {
		sendFunction = cons.enqueueAndPersist
	}

	spin := shared.NewSpinner(shared.SpinPriorityLow)
	buffer := shared.NewBufferedReader(fileBufferGrowSize, 0, 0, cons.delimiter)
	printFileOpenError := true

	for cons.state != fileStateDone {

		// Initialize the seek state if requested
		// Try to read the remains of the file first
		if cons.state == fileStateOpen {
			if cons.file != nil {
				buffer.ReadAll(cons.file, sendFunction)
			}
			cons.initFile()
			buffer.Reset(uint64(cons.seekOffset))
		}

		// Try to open the file to read from
		if cons.state == fileStateRead && cons.file == nil {
			file, err := os.OpenFile(cons.realFileName(), os.O_RDONLY, 0666)

			switch {
			case err != nil:
				if printFileOpenError {
					Log.Error.Print("File open error - ", err)
					printFileOpenError = false
				}
				time.Sleep(3 * time.Second)
				continue // ### continue, retry ###

			default:
				cons.file = file
				cons.seekOffset, _ = cons.file.Seek(cons.seekOffset, cons.seek)
				printFileOpenError = true
			}
		}

		// Try to read from the file
		if cons.state == fileStateRead && cons.file != nil {
			err := buffer.ReadAll(cons.file, sendFunction)
			cons.WaitOnFuse()

			switch {
			case err == nil: // ok
				spin.Reset()
			case err == io.EOF:
				spin.Yield()
			case cons.state == fileStateRead:
				Log.Error.Print("Error reading file - ", err)
				cons.file.Close()
				cons.file = nil
			}
		}
	}
}