//NewRuleFileParser creates a new RuleFileParser, returns an error if the object is not valid func NewRuleFileParser(ruleFile string) (*RuleFileParser, error) { fileReader, err := os.Open(ruleFile) if err != nil { return nil, err } r := csv.NewReader(fileReader) r.TrimLeadingSpace = true r.Comment = '#' r.Comma = ';' records, err := r.ReadAll() if err != nil { return nil, err } lines := []RuleLine{} for _, elements := range records { command, args, err := parseCommand(elements[2]) if err != nil { return nil, err } if len(elements[0]) == 0 && len(elements[1]) == 0 { if len(lines) == 0 { return nil, fmt.Errorf("The first rule can not referance back") } lastRule := lines[len(lines)-1] lines = append(lines, RuleLine{name: lastRule.name, condition: lastRule.condition, command: command, args: args, flags: helper.StringToMap(elements[3], ",", "="), }) } else { lines = append(lines, RuleLine{name: elements[0], condition: elements[1], command: command, args: args, flags: helper.StringToMap(elements[3], ",", "="), }) } } client, err := Logging.NewClientOwnName(Config.GetClientConfig().LogServer.RPCInterface, "RuleSystem") if err != nil { return nil, err } return &RuleFileParser{ruleFile: ruleFile, lines: lines, externalModule: *Module.NewExternalModule(), LogClient: client}, nil }
//Waits for files to parse and sends the data to the main queue. func (w *NagiosSpoolfileWorker) run() { promServer := statistics.GetPrometheusServer() var file string for { select { case <-w.quit: w.quit <- true return case file = <-w.jobs: promServer.SpoolFilesInQueue.Set(float64(len(w.jobs))) startTime := time.Now() logging.GetLogger().Debug("Reading file: ", file) filehandle, err := os.OpenFile(file, os.O_RDONLY, os.ModePerm) if err != nil { logging.GetLogger().Warn("NagiosSpoolfileWorker: Opening file error: ", err) break } reader := bufio.NewReaderSize(filehandle, 4096*10) queries := 0 line, isPrefix, err := reader.ReadLine() for err == nil && !isPrefix { splittedPerformanceData := helper.StringToMap(string(line), "\t", "::") for singlePerfdata := range w.PerformanceDataIterator(splittedPerformanceData) { for _, r := range w.results { select { case <-w.quit: w.quit <- true return case r <- singlePerfdata: queries++ case <-time.After(time.Duration(1) * time.Minute): logging.GetLogger().Warn("NagiosSpoolfileWorker: Could not write to buffer") } } } line, isPrefix, err = reader.ReadLine() } if err != nil && err != io.EOF { logging.GetLogger().Warn(err) } if isPrefix { logging.GetLogger().Warn("NagiosSpoolfileWorker: filebuffer is too small") } filehandle.Close() err = os.Remove(file) if err != nil { logging.GetLogger().Warn(err) } promServer.SpoolFilesParsedDuration.Add(float64(time.Since(startTime).Nanoseconds() / 1000000)) promServer.SpoolFilesLines.Add(float64(queries)) case <-time.After(time.Duration(5) * time.Minute): logging.GetLogger().Debug("NagiosSpoolfileWorker: Got nothing to do") } } }
//Waits for files to parse and sends the data to the main queue. func (w *NagiosSpoolfileWorker) run() { var file string for { select { case <-w.quit: w.quit <- true return case file = <-w.jobs: startTime := time.Now() data, err := ioutil.ReadFile(file) if err != nil { break } lines := strings.SplitAfter(string(data), "\n") queries := 0 for _, line := range lines { splittedPerformanceData := helper.StringToMap(line, "\t", "::") for singlePerfdata := range w.performanceDataIterator(splittedPerformanceData) { select { case <-w.quit: w.quit <- true return case w.results <- singlePerfdata: queries++ case <-time.After(time.Duration(1) * time.Minute): logging.GetLogger().Warn("NagiosSpoolfileWorker: Could not write to buffer") } } } err = os.Remove(file) if err != nil { logging.GetLogger().Warn(err) } w.statistics.ReceiveQueries("read/parsed", statistics.QueriesPerTime{queries, time.Since(startTime)}) case <-time.After(time.Duration(5) * time.Minute): logging.GetLogger().Debug("NagiosSpoolfileWorker: Got nothing to do") } } }
func (g GearmanWorker) handelJob(job worker.Job) ([]byte, error) { secret := job.Data() if g.aesECBDecrypter != nil { var err error secret, err = g.aesECBDecrypter.Decypt(secret) if err != nil { g.log.Warn(err, ". Data: ", string(job.Data())) return job.Data(), nil } } splittedPerformanceData := helper.StringToMap(string(secret), "\t", "::") g.log.Debug("[ModGearman] ", string(job.Data())) g.log.Debug("[ModGearman] ", splittedPerformanceData) for singlePerfdata := range g.nagiosSpoolfileWorker.PerformanceDataIterator(splittedPerformanceData) { for _, r := range g.results { select { case r <- singlePerfdata: case <-time.After(time.Duration(1) * time.Minute): logging.GetLogger().Warn("GearmanWorker: Could not write to buffer") } } } return job.Data(), nil }
//PerformanceDataIterator returns an iterator to loop over generated perf data. func (w *NagiosSpoolfileWorker) PerformanceDataIterator(input map[string]string) <-chan PerformanceData { ch := make(chan PerformanceData) typ := findType(input) if typ == "" { if len(input) > 1 { logging.GetLogger().Info("Line does not match the scheme", input) } close(ch) return ch } currentCommand := w.searchAltCommand(input[typ+"PERFDATA"], input[typ+checkcommand]) currentTime := helper.CastStringTimeFromSToMs(input[timet]) currentService := "" if typ != hostType { currentService = input[servicedesc] } go func() { perfSlice := regexPerformancelable.FindAllStringSubmatch(input[typ+"PERFDATA"], -1) currentCheckMultiLabel := "" //try to find a check_multi prefix if len(perfSlice) > 0 && len(perfSlice[0]) > 1 { currentCheckMultiLabel = getCheckMultiRegexMatch(perfSlice[0][1]) } item: for _, value := range perfSlice { // Allows to add tags and fields to spoolfileentries tag := map[string]string{} if tagString, ok := input[nagfluxTags]; ok { tag = helper.StringToMap(tagString, " ", "=") } field := map[string]string{} if tagString, ok := input[nagfluxField]; ok { field = helper.StringToMap(tagString, " ", "=") } perf := PerformanceData{ hostname: input[hostname], service: currentService, command: currentCommand, time: currentTime, performanceLabel: value[1], unit: value[3], tags: tag, fields: field, } if currentCheckMultiLabel != "" { //if an check_multi prefix was found last time //test if the current one has also one if potentialNextOne := getCheckMultiRegexMatch(perf.performanceLabel); potentialNextOne == "" { // if not put the last one in front the current perf.performanceLabel = currentCheckMultiLabel + perf.performanceLabel } else { // else remember the current prefix for the next one currentCheckMultiLabel = potentialNextOne } } for i, data := range value { if i > 1 && i != 3 && data != "" { performanceType, err := indexToperformanceType(i) if err != nil { logging.GetLogger().Warn(err, value) continue } //Add downtime tag if needed if performanceType == "value" && w.livestatusCacheBuilder.IsServiceInDowntime(perf.hostname, perf.service, input[timet]) { perf.tags["downtime"] = "true" } if performanceType == "warn" || performanceType == "crit" { //Range handling fillLabel := performanceType + "-fill" rangeHits := rangeRegex.FindAllStringSubmatch(data, -1) if len(rangeHits) == 1 { perf.tags[fillLabel] = "none" perf.fields[performanceType] = helper.StringIntToStringFloat(rangeHits[0][0]) } else if len(rangeHits) == 2 { //If there is a range with no infinity as border, create two points if strings.Contains(data, "@") { perf.tags[fillLabel] = "inner" } else { perf.tags[fillLabel] = "outer" } for i, tag := range []string{"min", "max"} { tagKey := fmt.Sprintf("%s-%s", performanceType, tag) perf.fields[tagKey] = helper.StringIntToStringFloat(rangeHits[i][0]) } } else { logging.GetLogger().Warn("Regexmatching went wrong", rangeHits, data, value) } } else { if !helper.IsStringANumber(data) { continue item } perf.fields[performanceType] = helper.StringIntToStringFloat(data) } } } ch <- perf } close(ch) }() return ch }