//Delegates the files to its workers.
func (s *NagiosSpoolfileCollector) run() {
	promServer := statistics.GetPrometheusServer()
	for {
		select {
		case <-s.quit:
			s.quit <- true
			return
		case <-time.After(IntervalToCheckDirectory):
			pause := config.PauseNagflux.Load().(bool)
			if pause {
				logging.GetLogger().Debugln("NagiosSpoolfileCollector in pause")
				continue
			}

			logging.GetLogger().Debug("Reading Directory: ", s.spoolDirectory)
			files, _ := ioutil.ReadDir(s.spoolDirectory)
			promServer.SpoolFilesOnDisk.Set(float64(len(files)))
			for _, currentFile := range files {
				select {
				case <-s.quit:
					s.quit <- true
					return
				case s.jobs <- path.Join(s.spoolDirectory, currentFile.Name()):
				case <-time.After(time.Duration(1) * time.Minute):
					logging.GetLogger().Warn("NagiosSpoolfileCollector: Could not write to buffer")
				}
			}
		}
	}
}
//Waits for files to parse and sends the data to the main queue.
func (w *NagiosSpoolfileWorker) run() {
	promServer := statistics.GetPrometheusServer()
	var file string
	for {
		select {
		case <-w.quit:
			w.quit <- true
			return
		case file = <-w.jobs:
			promServer.SpoolFilesInQueue.Set(float64(len(w.jobs)))
			startTime := time.Now()
			logging.GetLogger().Debug("Reading file: ", file)
			filehandle, err := os.OpenFile(file, os.O_RDONLY, os.ModePerm)
			if err != nil {
				logging.GetLogger().Warn("NagiosSpoolfileWorker: Opening file error: ", err)
				break
			}
			reader := bufio.NewReaderSize(filehandle, 4096*10)
			queries := 0
			line, isPrefix, err := reader.ReadLine()
			for err == nil && !isPrefix {
				splittedPerformanceData := helper.StringToMap(string(line), "\t", "::")
				for singlePerfdata := range w.PerformanceDataIterator(splittedPerformanceData) {
					for _, r := range w.results {
						select {
						case <-w.quit:
							w.quit <- true
							return
						case r <- singlePerfdata:
							queries++
						case <-time.After(time.Duration(1) * time.Minute):
							logging.GetLogger().Warn("NagiosSpoolfileWorker: Could not write to buffer")
						}
					}
				}
				line, isPrefix, err = reader.ReadLine()
			}
			if err != nil && err != io.EOF {
				logging.GetLogger().Warn(err)
			}
			if isPrefix {
				logging.GetLogger().Warn("NagiosSpoolfileWorker: filebuffer is too small")
			}
			filehandle.Close()
			err = os.Remove(file)
			if err != nil {
				logging.GetLogger().Warn(err)
			}
			promServer.SpoolFilesParsedDuration.Add(float64(time.Since(startTime).Nanoseconds() / 1000000))
			promServer.SpoolFilesLines.Add(float64(queries))
		case <-time.After(time.Duration(5) * time.Minute):
			logging.GetLogger().Debug("NagiosSpoolfileWorker: Got nothing to do")
		}
	}
}
示例#3
0
//WorkerGenerator generates a new Worker and starts it.
func WorkerGenerator(jobs chan collector.Printable, connection, index, dumpFile, version string, connector *Connector) func(workerId int) *Worker {
	return func(workerId int) *Worker {
		worker := &Worker{
			workerId, make(chan bool),
			make(chan bool, 1), jobs,
			connection, dumpFile,
			logging.GetLogger(), version,
			connector, http.Client{}, true, index,
			statistics.GetPrometheusServer()}
		go worker.run()
		return worker
	}
}
示例#4
0
//WorkerGenerator generates a new Worker and starts it.
func WorkerGenerator(jobs chan collector.Printable, connection, dumpFile, version string, connector *Connector, datatype data.Datatype) func(workerId int) *Worker {
	return func(workerId int) *Worker {
		timeout := time.Duration(5 * time.Second)
		transport := &http.Transport{
			TLSClientConfig: &tls.Config{
				InsecureSkipVerify: true,
			},
		}
		client := http.Client{Timeout: timeout, Transport: transport}
		worker := &Worker{
			workerId, make(chan bool),
			make(chan bool, 1), jobs,
			connection, nagflux.GenDumpfileName(dumpFile, datatype),
			logging.GetLogger(), version,
			connector, client, true, datatype, statistics.GetPrometheusServer()}
		go worker.run()
		return worker
	}
}