Esempio n. 1
0
//SanitizeInfluxInput adds backslashes to special chars.
func SanitizeInfluxInput(input string) string {
	if len(input) == 0 {
		return input
	}
	if string(input[0]) == `"` && string(input[len(input)-1]) == `"` {
		return input
	}
	if config.GetConfig().Influx.NastyString != "" {
		input = strings.Replace(input, config.GetConfig().Influx.NastyString, config.GetConfig().Influx.NastyStringToReplace, -1)
	}
	input = strings.Trim(input, `'`)
	input = strings.Replace(input, " ", `\ `, -1)
	input = strings.Replace(input, ",", `\,`, -1)

	return input
}
Esempio n. 2
0
//PrintForInfluxDB prints the data in influxdb lineformat
func (p PerformanceData) PrintForInfluxDB(version string) string {
	if helper.VersionOrdinal(version) >= helper.VersionOrdinal("0.9") {
		tableName := fmt.Sprintf(`metrics,host=%s`, helper.SanitizeInfluxInput(p.hostname))
		if p.service == "" {
			tableName += fmt.Sprintf(`,service=%s`, helper.SanitizeInfluxInput(config.GetConfig().Influx.HostcheckAlias))
		} else {
			tableName += fmt.Sprintf(`,service=%s`, helper.SanitizeInfluxInput(p.service))
		}
		tableName += fmt.Sprintf(`,command=%s,performanceLabel=%s`,
			helper.SanitizeInfluxInput(p.command),
			helper.SanitizeInfluxInput(p.performanceLabel),
		)
		if len(p.tags) > 0 {
			tableName += fmt.Sprintf(`,%s`, helper.PrintMapAsString(helper.SanitizeMap(p.tags), ",", "="))
		}
		if p.unit != "" {
			tableName += fmt.Sprintf(`,unit=%s`, p.unit)
		}

		tableName += fmt.Sprintf(` %s`, helper.PrintMapAsString(helper.SanitizeMap(p.fields), ",", "="))
		tableName += fmt.Sprintf(" %s\n", p.time)
		return tableName
	}
	return ""
}
Esempio n. 3
0
//createTemplate creates the nagflux template.
func (connector *Connector) createTemplate() bool {
	mapping := fmt.Sprintf(NagfluxTemplate, connector.index, config.GetConfig().Elasticsearch.NumberOfShards, config.GetConfig().Elasticsearch.NumberOfReplicas)
	createIndex, _ := helper.SentReturnCodeIsOK(connector.httpClient, connector.connectionHost+"_template/"+connector.index, "PUT", mapping)
	if !createIndex {
		return false
	}
	return true
}
Esempio n. 4
0
func (live Data) genElasticLineWithValue(index, typ, value, timestamp string) string {
	value = strings.Replace(value, `"`, `\"`, -1)
	if live.serviceDisplayName == "" {
		live.serviceDisplayName = config.GetConfig().Elasticsearch.HostcheckAlias
	}
	head := fmt.Sprintf(`{"index":{"_index":"%s","_type":"messages"}}`, helper.GenIndex(index, timestamp)) + "\n"
	data := fmt.Sprintf(`{"timestamp":%s,"message":"%s","author":"%s","host":"%s","service":"%s","type":"%s"}`+"\n",
		helper.CastStringTimeFromSToMs(timestamp), value, live.author, live.hostName, live.serviceDisplayName, typ,
	)
	return head + data
}
Esempio n. 5
0
//GenIndex generates an index depending on the config, ending with year and month
func GenIndex(index, timeString string) string {
	rotation := config.GetConfig().Elasticsearch.IndexRotation
	year, month := GetYearMonthFromStringTimeMs(timeString)
	switch rotation {
	case "monthly":
		return fmt.Sprintf("%s-%d.%02d", index, year, month)
	case "yearly":
		return fmt.Sprintf("%s-%d", index, year)
	default:
		panic(fmt.Sprintf("The given IndexRotation[%s] is not supported", rotation))
	}
}
Esempio n. 6
0
func getLivestatusVersion(live *Collector) int {
	printables := make(chan collector.Printable, 1)
	finished := make(chan bool, 1)
	var version string
	live.requestPrintablesFromLivestatus(QueryLivestatusVersion, false, printables, finished)
	i := 0
	oneMinute := time.Duration(1) * time.Minute
	roundsToWait := config.GetConfig().Livestatus.MinutesToWait
Loop:
	for roundsToWait != 0 {
		select {
		case versionPrintable := <-printables:
			version = versionPrintable.PrintForInfluxDB("0")
			break Loop
		case <-time.After(oneMinute):
			if i < roundsToWait {
				go live.requestPrintablesFromLivestatus(QueryLivestatusVersion, false, printables, finished)
			} else {
				break Loop
			}
			i++
		case fin := <-finished:
			if !fin {
				live.log.Infof(
					"Could not detect livestatus version, waiting for %s %d times( %d/%d )...",
					oneMinute, roundsToWait, i, roundsToWait,
				)
			}
		}
	}

	live.log.Info("Livestatus version: ", version)
	if icinga2, _ := regexp.MatchString(`^r[\d\.-]+$`, version); icinga2 {
		return Icinga2
	} else if nagios, _ := regexp.MatchString(`^[\d\.]+p[\d\.]+$`, version); nagios {
		return Nagios
	} else if neamon, _ := regexp.MatchString(`^[\d\.]+-naemon$`, version); neamon {
		return Naemon
	}
	live.log.Warn("Could not detect livestatus type, with version: ", version, ". Asuming Nagios")
	return -1
}
Esempio n. 7
0
func (g GearmanWorker) handleLoad() {
	bufferLimit := int(float32(config.GetConfig().Main.BufferSize) * 0.90)
	for {
		for _, r := range g.results {
			if len(r) > bufferLimit && g.worker != nil {
				g.worker.Lock()
				for len(r) > bufferLimit {
					time.Sleep(time.Duration(100) * time.Millisecond)
				}
				g.worker.Unlock()
			}
		}
		select {
		case <-g.quit:
			g.quit <- true
			return
		case <-time.After(time.Duration(1) * time.Second):
		}
	}
}
Esempio n. 8
0
//PrintForElasticsearch prints in the elasticsearch json format
func (p PerformanceData) PrintForElasticsearch(version, index string) string {
	if helper.VersionOrdinal(version) >= helper.VersionOrdinal("2.0") {
		if p.service == "" {
			p.service = config.GetConfig().Influx.HostcheckAlias
		}
		head := fmt.Sprintf(`{"index":{"_index":"%s","_type":"metrics"}}`, helper.GenIndex(index, p.time)) + "\n"
		data := fmt.Sprintf(
			`{"timestamp":%s,"host":"%s","service":"%s","command":"%s","performanceLabel":"%s"`,
			p.time,
			helper.SanitizeElasicInput(p.hostname),
			helper.SanitizeElasicInput(p.service),
			helper.SanitizeElasicInput(p.command),
			helper.SanitizeElasicInput(p.performanceLabel),
		)
		if p.unit != "" {
			data += fmt.Sprintf(`,"unit":"%s"`, helper.SanitizeElasicInput(p.unit))
		}
		data += helper.CreateJSONFromStringMap(p.tags)
		data += helper.CreateJSONFromStringMap(p.fields)
		data += "}\n"
		return head + data
	}
	return ""
}
Esempio n. 9
0
//Generates the Influxdb tablename.
func (live Data) getTablename() string {
	if live.serviceDisplayName == "" {
		live.serviceDisplayName = config.GetConfig().Influx.HostcheckAlias
	}
	return fmt.Sprintf("messages,host=%s,service=%s", live.hostName, live.serviceDisplayName)
}
Esempio n. 10
0
func main() {
	//Parse Args
	var configPath string
	var printver bool
	flag.Usage = func() {
		fmt.Println(`Nagflux by Philip Griesbacher`, nagfluxVersion, `
Commandline Parameter:
-configPath Path to the config file. If no file path is given the default is ./config.gcfg.
-V Print version and exit`)
	}
	flag.StringVar(&configPath, "configPath", "config.gcfg", "path to the config file")
	flag.BoolVar(&printver, "V", false, "print version and exit")
	flag.Parse()

	//Print version and exit
	if printver {
		fmt.Println(nagfluxVersion)
		os.Exit(0)
	}

	//Load config
	config.InitConfig(configPath)
	cfg := config.GetConfig()

	//Create Logger
	logging.InitLogger(cfg.Log.LogFile, cfg.Log.MinSeverity)
	log = logging.GetLogger()
	log.Info(`Started Nagflux `, nagfluxVersion)
	resultQueues := map[data.Datatype]chan collector.Printable{}
	stoppables := []Stoppable{}
	if len(cfg.Main.FieldSeparator) < 1 {
		panic("FieldSeparator is too short!")
	}
	pro := statistics.NewPrometheusServer(cfg.Monitoring.PrometheusAddress)
	pro.WatchResultQueueLength(resultQueues)
	fieldSeparator := []rune(cfg.Main.FieldSeparator)[0]

	config.PauseNagflux.Store(false)

	if cfg.Influx.Enabled {
		resultQueues[data.InfluxDB] = make(chan collector.Printable, cfg.Main.BufferSize)
		influx := influx.ConnectorFactory(resultQueues[data.InfluxDB], cfg.Influx.Address, cfg.Influx.Arguments, cfg.Main.DumpFile, cfg.Influx.Version, cfg.Main.InfluxWorker, cfg.Main.MaxInfluxWorker, cfg.Influx.CreateDatabaseIfNotExists)
		stoppables = append(stoppables, influx)
		influxDumpFileCollector := nagflux.NewDumpfileCollector(resultQueues[data.InfluxDB], cfg.Main.DumpFile, data.InfluxDB)
		stoppables = append(stoppables, influxDumpFileCollector)
	}

	if cfg.Elasticsearch.Enabled {
		resultQueues[data.Elasticsearch] = make(chan collector.Printable, cfg.Main.BufferSize)
		elasticsearch := elasticsearch.ConnectorFactory(resultQueues[data.Elasticsearch], cfg.Elasticsearch.Address, cfg.Elasticsearch.Index, cfg.Main.DumpFile, cfg.Elasticsearch.Version, cfg.Main.InfluxWorker, cfg.Main.MaxInfluxWorker, true)
		stoppables = append(stoppables, elasticsearch)
		elasticDumpFileCollector := nagflux.NewDumpfileCollector(resultQueues[data.Elasticsearch], cfg.Main.DumpFile, data.Elasticsearch)
		stoppables = append(stoppables, elasticDumpFileCollector)
	}

	//Some time for the dumpfile to fill the queue
	time.Sleep(time.Duration(100) * time.Millisecond)

	liveconnector := &livestatus.Connector{log, cfg.Livestatus.Address, cfg.Livestatus.Type}
	livestatusCollector := livestatus.NewLivestatusCollector(resultQueues, liveconnector, true)
	livestatusCache := livestatus.NewLivestatusCacheBuilder(liveconnector)

	for name, data := range cfg.ModGearman {
		if data == nil || !(*data).Enabled {
			continue
		}
		log.Infof("Mod_Gearman: %s - %s [%s]", name, (*data).Address, (*data).Queue)
		secret := modGearman.GetSecret((*data).Secret, (*data).SecretFile)
		for i := 0; i < (*data).Worker; i++ {
			gearmanWorker := modGearman.NewGearmanWorker((*data).Address,
				(*data).Queue,
				secret,
				resultQueues,
				livestatusCache,
			)
			stoppables = append(stoppables, gearmanWorker)
		}
	}

	log.Info("Nagios Spoolfile Folder: ", cfg.Main.NagiosSpoolfileFolder)
	nagiosCollector := spoolfile.NagiosSpoolfileCollectorFactory(cfg.Main.NagiosSpoolfileFolder, cfg.Main.NagiosSpoolfileWorker, resultQueues, livestatusCache)

	log.Info("Nagflux Spoolfile Folder: ", cfg.Main.NagfluxSpoolfileFolder)
	nagfluxCollector := nagflux.NewNagfluxFileCollector(resultQueues, cfg.Main.NagfluxSpoolfileFolder, fieldSeparator)

	//Listen for Interrupts
	interruptChannel := make(chan os.Signal, 1)
	signal.Notify(interruptChannel, syscall.SIGINT)
	signal.Notify(interruptChannel, syscall.SIGTERM)
	go func() {
		<-interruptChannel
		log.Warn("Got Interrupted")
		stoppables = append(stoppables, []Stoppable{livestatusCollector, livestatusCache, nagiosCollector, nagfluxCollector}...)
		cleanUp(stoppables, resultQueues)
		quit <- true
	}()
loop:
	//Main loop
	for {
		select {
		case <-time.After(time.Duration(updateRate) * time.Second):
		/*queriesSend, measureTime, err := statisticUser.GetData("send")
			if err != nil {
				continue
			}
			idleTime := (measureTime.Seconds() - queriesSend.Time.Seconds() / float64(influx.AmountWorkers())) / updateRate
			log.Debugf("Buffer len: %d - Idletime in percent: %0.2f ", len(resultQueues[0]), idleTime * 100)

		//TODO: fix worker spawn by type
			if idleTime > 0.25 {
				influx.RemoveWorker()
			} else if idleTime < 0.1 && float64(len(resultQueues[0])) > resultQueueLength * 0.8 {
				influx.AddWorker()
			}*/
		case <-quit:
			break loop
		}
	}
}