示例#1
0
func (p *persistence) rebuildLabelIndexes(
	fpToSeries map[model.Fingerprint]*memorySeries,
) error {
	count := 0
	log.Info("Rebuilding label indexes.")
	log.Info("Indexing metrics in memory.")
	for fp, s := range fpToSeries {
		p.indexMetric(fp, s.metric)
		count++
		if count%10000 == 0 {
			log.Infof("%d metrics queued for indexing.", count)
		}
	}
	log.Info("Indexing archived metrics.")
	var fp codable.Fingerprint
	var m codable.Metric
	if err := p.archivedFingerprintToMetrics.ForEach(func(kv index.KeyValueAccessor) error {
		if err := kv.Key(&fp); err != nil {
			return err
		}
		if err := kv.Value(&m); err != nil {
			return err
		}
		p.indexMetric(model.Fingerprint(fp), model.Metric(m))
		count++
		if count%10000 == 0 {
			log.Infof("%d metrics queued for indexing.", count)
		}
		return nil
	}); err != nil {
		return err
	}
	log.Info("All requests for rebuilding the label indexes queued. (Actual processing may lag behind.)")
	return nil
}
func (t *StorageQueueManager) runShard(i int) {
	defer t.wg.Done()
	shard := t.shards[i]

	// Send batches of at most MaxSamplesPerSend samples to the remote storage.
	// If we have fewer samples than that, flush them out after a deadline
	// anyways.
	pendingSamples := model.Samples{}

	for {
		select {
		case s, ok := <-shard:
			if !ok {
				if len(pendingSamples) > 0 {
					log.Infof("Flushing %d samples to remote storage...", len(pendingSamples))
					t.sendSamples(pendingSamples)
					log.Infof("Done flushing.")
				}
				return
			}

			pendingSamples = append(pendingSamples, s)

			for len(pendingSamples) >= t.cfg.MaxSamplesPerSend {
				t.sendSamples(pendingSamples[:t.cfg.MaxSamplesPerSend])
				pendingSamples = pendingSamples[t.cfg.MaxSamplesPerSend:]
			}
		case <-time.After(t.cfg.BatchSendDeadline):
			if len(pendingSamples) > 0 {
				t.sendSamples(pendingSamples)
				pendingSamples = pendingSamples[:0]
			}
		}
	}
}
示例#3
0
// Run continuously sends samples to the remote storage.
func (t *StorageQueueManager) Run() {
	defer func() {
		close(t.drained)
	}()

	// Send batches of at most maxSamplesPerSend samples to the remote storage.
	// If we have fewer samples than that, flush them out after a deadline
	// anyways.
	for {
		select {
		case s, ok := <-t.queue:
			if !ok {
				log.Infof("Flushing %d samples to remote storage...", len(t.pendingSamples))
				t.flush()
				log.Infof("Done flushing.")
				return
			}

			t.pendingSamples = append(t.pendingSamples, s)

			for len(t.pendingSamples) >= maxSamplesPerSend {
				t.sendSamples(t.pendingSamples[:maxSamplesPerSend])
				t.pendingSamples = t.pendingSamples[maxSamplesPerSend:]
			}
		case <-time.After(batchSendDeadline):
			t.flush()
		}
	}
}
示例#4
0
func main() {
	flag.Parse()

	if *printCollectors {
		collectorNames := make(sort.StringSlice, 0, len(collector.Factories))
		for n := range collector.Factories {
			collectorNames = append(collectorNames, n)
		}
		collectorNames.Sort()
		fmt.Printf("Available collectors:\n")
		for _, n := range collectorNames {
			fmt.Printf(" - %s\n", n)
		}
		return
	}
	collectors, err := loadCollectors()
	if err != nil {
		log.Fatalf("Couldn't load collectors: %s", err)
	}

	log.Infof("Enabled collectors:")
	for n := range collectors {
		log.Infof(" - %s", n)
	}

	nodeCollector := NodeCollector{collectors: collectors}
	prometheus.MustRegister(nodeCollector)

	sigUsr1 := make(chan os.Signal)
	signal.Notify(sigUsr1, syscall.SIGUSR1)

	handler := prometheus.Handler()
	if *authUser != "" || *authPass != "" {
		if *authUser == "" || *authPass == "" {
			log.Fatal("You need to specify -auth.user and -auth.pass to enable basic auth")
		}
		handler = &basicAuthHandler{
			handler:  prometheus.Handler().ServeHTTP,
			user:     *authUser,
			password: *authPass,
		}
	}

	http.Handle(*metricsPath, handler)
	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		w.Write([]byte(`<html>
			<head><title>Node Exporter</title></head>
			<body>
			<h1>Node Exporter</h1>
			<p><a href="` + *metricsPath + `">Metrics</a></p>
			</body>
			</html>`))
	})

	log.Infof("Starting node_exporter v%s at %s", Version, *listenAddress)
	err = http.ListenAndServe(*listenAddress, nil)
	if err != nil {
		log.Fatal(err)
	}
}
示例#5
0
func main() {
	var (
		listenAddress     = flag.String("web.listen-address", ":9100", "Address on which to expose metrics and web interface.")
		metricsPath       = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.")
		enabledCollectors = flag.String("collectors.enabled", filterAvailableCollectors(defaultCollectors), "Comma-separated list of collectors to use.")
		printCollectors   = flag.Bool("collectors.print", false, "If true, print available collectors and exit.")
	)
	flag.Parse()

	if *printCollectors {
		collectorNames := make(sort.StringSlice, 0, len(collector.Factories))
		for n := range collector.Factories {
			collectorNames = append(collectorNames, n)
		}
		collectorNames.Sort()
		fmt.Printf("Available collectors:\n")
		for _, n := range collectorNames {
			fmt.Printf(" - %s\n", n)
		}
		return
	}
	collectors, err := loadCollectors(*enabledCollectors)
	if err != nil {
		log.Fatalf("Couldn't load collectors: %s", err)
	}

	log.Infof("Enabled collectors:")
	for n := range collectors {
		log.Infof(" - %s", n)
	}

	nodeCollector := NodeCollector{collectors: collectors}
	prometheus.MustRegister(nodeCollector)

	handler := prometheus.Handler()

	http.Handle(*metricsPath, handler)
	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		w.Write([]byte(`<html>
			<head><title>Node Exporter</title></head>
			<body>
			<h1>Node Exporter</h1>
			<p><a href="` + *metricsPath + `">Metrics</a></p>
			</body>
			</html>`))
	})

	log.Infof("Starting node_exporter v%s at %s", Version, *listenAddress)
	err = http.ListenAndServe(*listenAddress, nil)
	if err != nil {
		log.Fatal(err)
	}
}
func parseTinystatsFile() {

	if *ipv4TinystatsFile != "disabled" {
		statsFile, err := os.Open(*ipv4TinystatsFile)
		if err != nil {
			log.Infof("error opening file, skipping: %s", *ipv4TinystatsFile)
		} else {
			reader := bufio.NewReader(statsFile)
			line, err := reader.ReadString('\n')
			if err == nil {
				q := strings.Split(line, ":")

				ipv4QueryA, ipv4QueryNS, ipv4QueryCNAME,
					ipv4QuerySOA, ipv4QueryPTR, ipv4QueryHINFO,
					ipv4QueryMX, ipv4QueryTXT, ipv4QueryRP,
					ipv4QuerySIG, ipv4QueryKEY, ipv4QueryAAAA,
					ipv4QueryAXFR, ipv4QueryANY, ipv4QueryTOTAL,
					ipv4QueryOTHER, ipv4QueryNOTAUTH, ipv4QueryNOTIMPL,
					ipv4QueryBADCLASS, ipv4QueryNOQUERY = q[0], q[1],
					q[2], q[3], q[4], q[5], q[6], q[7], q[8],
					q[9], q[10], q[11], q[12], q[13], q[14],
					q[15], q[16], q[17], q[18], q[19]
				statsFile.Close()
			}
		}
	}

	if *ipv6TinystatsFile != "disabled" {
		statsFile, err := os.Open(*ipv6TinystatsFile)
		if err != nil {
			log.Infof("error opening file, skipping: %s", *ipv6TinystatsFile)
		} else {
			reader := bufio.NewReader(statsFile)
			line, err := reader.ReadString('\n')
			if err == nil {
				q := strings.Split(line, ":")

				ipv6QueryA, ipv6QueryNS, ipv6QueryCNAME,
					ipv6QuerySOA, ipv6QueryPTR, ipv6QueryHINFO,
					ipv6QueryMX, ipv6QueryTXT, ipv6QueryRP,
					ipv6QuerySIG, ipv6QueryKEY, ipv6QueryAAAA,
					ipv6QueryAXFR, ipv6QueryANY, ipv6QueryTOTAL,
					ipv6QueryOTHER, ipv6QueryNOTAUTH, ipv6QueryNOTIMPL,
					ipv6QueryBADCLASS, ipv6QueryNOQUERY = q[0], q[1],
					q[2], q[3], q[4], q[5], q[6], q[7], q[8],
					q[9], q[10], q[11], q[12], q[13], q[14],
					q[15], q[16], q[17], q[18], q[19]
				statsFile.Close()
			}
		}
	}
}
示例#7
0
func (e *Exporter) scrapeApps(json *gabs.Container, ch chan<- prometheus.Metric) {
	elements, _ := json.S("apps").Children()
	states := map[string]string{
		"running":    "tasksRunning",
		"staged":     "tasksStaged",
		"healthy":    "tasksHealthy",
		"unhealthy":  "tasksUnhealthy",
		"cpus":       "cpus",
		"mem_in_mb":  "mem",
		"disk_in_mb": "disk",
		"gpus":       "gpus",
		"avg_uptime": "taskStats.startedAfterLastScaling.stats.lifeTime.averageSeconds",
	}

	name := "app_instances"
	gauge, new := e.Gauges.Fetch(name, "Marathon app instance count", "app")
	if new {
		log.Infof("Added gauge %q\n", name)
	}
	gauge.Reset()

	for _, app := range elements {
		id := app.Path("id").Data().(string)
		data := app.Path("instances").Data()
		count, ok := data.(float64)
		if !ok {
			log.Debugf(fmt.Sprintf("Bad conversion! Unexpected value \"%v\" for number of app instances\n", data))
			continue
		}

		gauge.WithLabelValues(id).Set(count)

		for key, value := range states {
			name := fmt.Sprintf("app_task_%s", key)
			gauge, new := e.Gauges.Fetch(name, fmt.Sprintf("Marathon app task %s count", key), "app")
			if new {
				log.Infof("Added gauge %q\n", name)
			}

			data := app.Path(value).Data()
			count, ok := data.(float64)
			if !ok {
				log.Debugf(fmt.Sprintf("Bad conversion! Unexpected value \"%v\" for number of \"%s\" tasks\n", data, key))
				continue
			}

			gauge.WithLabelValues(id).Set(count)
		}
	}
}
示例#8
0
func reloadConfig(filename string, rls ...Reloadable) (err error) {
	log.Infof("Loading configuration file %s", filename)
	defer func() {
		if err == nil {
			configSuccess.Set(1)
			configSuccessTime.Set(float64(time.Now().Unix()))
		} else {
			configSuccess.Set(0)
		}
	}()

	conf, err := config.LoadFile(filename)
	if err != nil {
		return fmt.Errorf("couldn't load configuration (-config.file=%s): %v", filename, err)
	}

	failed := false
	for _, rl := range rls {
		if err := rl.ApplyConfig(conf); err != nil {
			log.Error("Failed to apply configuration: ", err)
			failed = true
		}
	}
	if failed {
		return fmt.Errorf("one or more errors occurred while applying the new configuration (-config.file=%s)", filename)
	}
	return nil
}
示例#9
0
// ListenAndServe start the server
func ListenAndServe(addr string, core core.Core, fe fs.FileExplorer) {
	log.Infof("REST listening at: http://%v", core.GetAddr())
	clientHandlers := clientAPI.NewHandler(core)
	mesosHandlers := mesosAPI.NewHandler(core)
	fsHandlers := fsAPI.NewHandler(fe)
	r := createRouter(core, clientHandlers, mesosHandlers, fsHandlers)

	m := martini.New()
	m.Use(cors.Allow(&cors.Options{
		AllowOrigins:     []string{"*"},
		AllowMethods:     []string{"POST", "GET", "PUT", "DELETE"},
		AllowHeaders:     []string{"Origin", "x-requested-with", "Content-Type", "Content-Range", "Content-Disposition", "Content-Description"},
		ExposeHeaders:    []string{"Content-Length"},
		AllowCredentials: false,
	}))
	m.Use(logger())
	m.Use(recovery())
	m.Use(martini.Static("static"))
	m.Use(martini.Static("temp", martini.StaticOptions{
		Prefix: "/context/",
	}))
	m.Use(martini.Static("executor", martini.StaticOptions{
		Prefix: "/executor/",
	}))
	m.Action(r.Handle)
	go m.RunOnAddr(addr)
}
示例#10
0
func reloadConfig(filename string, rls ...Reloadable) (err error) {
	log.Infof("Loading configuration file %s", filename)
	defer func() {
		if err == nil {
			configSuccess.Set(1)
			configSuccessTime.Set(float64(time.Now().Unix()))
		} else {
			configSuccess.Set(0)
		}
	}()

	conf, err := config.LoadFile(filename)
	if err != nil {
		return fmt.Errorf("couldn't load configuration (-config.file=%s): %v", filename, err)
	}

	// Apply all configs and return the first error if there were any.
	for _, rl := range rls {
		if err != nil {
			err = rl.ApplyConfig(conf)
		} else {
			rl.ApplyConfig(conf)
		}
	}
	return err
}
示例#11
0
func watchConfig(fileName string, mapper *metricMapper) {
	watcher, err := fsnotify.NewWatcher()
	if err != nil {
		log.Fatal(err)
	}

	err = watcher.WatchFlags(fileName, fsnotify.FSN_MODIFY)
	if err != nil {
		log.Fatal(err)
	}

	for {
		select {
		case ev := <-watcher.Event:
			log.Infof("Config file changed (%s), attempting reload", ev)
			err = mapper.initFromFile(fileName)
			if err != nil {
				log.Errorln("Error reloading config:", err)
				configLoads.WithLabelValues("failure").Inc()
			} else {
				log.Infoln("Config reloaded successfully")
				configLoads.WithLabelValues("success").Inc()
			}
			// Re-add the file watcher since it can get lost on some changes. E.g.
			// saving a file with vim results in a RENAME-MODIFY-DELETE event
			// sequence, after which the newly written file is no longer watched.
			err = watcher.WatchFlags(fileName, fsnotify.FSN_MODIFY)
		case err := <-watcher.Error:
			log.Errorln("Error watching config:", err)
		}
	}
}
func main() {
	var (
		listenAddress = flag.String("listen-address", ":9120", "Address to listen on for web interface and telemetry.")
		metricsPath   = flag.String("metric-path", "/metrics", "Path under which to expose metrics.")
		apiURL        = flag.String("api-url", "http://localhost:8001/", "Base-URL of PowerDNS authoritative server/recursor API.")
		apiKey        = flag.String("api-key", "", "PowerDNS API Key")
	)
	flag.Parse()

	hostURL, err := url.Parse(*apiURL)
	if err != nil {
		log.Fatalf("Error parsing api-url: %v", err)
	}

	server, err := getServerInfo(hostURL, *apiKey)
	if err != nil {
		log.Fatalf("Could not fetch PowerDNS server info: %v", err)
	}

	exporter := NewExporter(*apiKey, server.DaemonType, hostURL)
	prometheus.MustRegister(exporter)

	log.Infof("Starting Server: %s", *listenAddress)
	http.Handle(*metricsPath, prometheus.Handler())
	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		w.Write([]byte(`<html>
             <head><title>PowerDNS Exporter</title></head>
             <body>
             <h1>PowerDNS Exporter</h1>
             <p><a href='` + *metricsPath + `'>Metrics</a></p>
             </body>
             </html>`))
	})
	log.Fatal(http.ListenAndServe(*listenAddress, nil))
}
示例#13
0
// This mirrors decodeValue in gosnmp's helper.go.
func pduValueAsString(pdu *gosnmp.SnmpPDU) string {
	switch pdu.Value.(type) {
	case int:
		return strconv.Itoa(pdu.Value.(int))
	case uint:
		return strconv.FormatUint(uint64(pdu.Value.(uint)), 10)
	case int64:
		return strconv.FormatInt(pdu.Value.(int64), 10)
	case string:
		if pdu.Type == gosnmp.ObjectIdentifier {
			// Trim leading period.
			return pdu.Value.(string)[1:]
		}
		return pdu.Value.(string)
	case []byte:
		// OctetString
		return string(pdu.Value.([]byte))
	case nil:
		return ""
	default:
		// This shouldn't happen.
		log.Infof("Got PDU with unexpected type: Name: %s Value: '%s', Go Type: %T SNMP Type: %s", pdu.Name, pdu.Value, pdu.Value, pdu.Type)
		snmpUnexpectedPduType.Inc()
		return fmt.Sprintf("%s", pdu.Value)
	}
}
示例#14
0
func reloadConfig(filename string, rls ...Reloadable) (success bool) {
	log.Infof("Loading configuration file %s", filename)
	defer func() {
		if success {
			configSuccess.Set(1)
			configSuccessTime.Set(float64(time.Now().Unix()))
		} else {
			configSuccess.Set(0)
		}
	}()

	conf, err := config.LoadFile(filename)
	if err != nil {
		log.Errorf("Couldn't load configuration (-config.file=%s): %v", filename, err)
		// TODO(julius): Remove this notice when releasing 0.17.0 or 0.18.0.
		if err.Error() == "unknown fields in global config: labels" {
			log.Errorf("NOTE: The 'labels' setting in the global configuration section has been renamed to 'external_labels' and now has changed semantics (see release notes at https://github.com/prometheus/prometheus/blob/master/CHANGELOG.md). Please update your configuration file accordingly.")
		}
		return false
	}
	success = true

	for _, rl := range rls {
		success = success && rl.ApplyConfig(conf)
	}
	return success
}
示例#15
0
func main() {
	flag.Parse()

	if *printCollectors {
		collectorNames := make(sort.StringSlice, 0, len(collector.Factories))
		for n := range collector.Factories {
			collectorNames = append(collectorNames, n)
		}
		collectorNames.Sort()
		fmt.Printf("Available collectors:\n")
		for _, n := range collectorNames {
			fmt.Printf(" - %s\n", n)
		}
		return
	}
	collectors, err := loadCollectors()
	if err != nil {
		log.Fatalf("Couldn't load collectors: %s", err)
	}

	log.Infof("Enabled collectors:")
	for n := range collectors {
		log.Infof(" - %s", n)
	}

	nodeCollector := NodeCollector{collectors: collectors}
	prometheus.MustRegister(nodeCollector)

	handler := prometheus.Handler()

	http.Handle(*metricsPath, handler)
	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		w.Write([]byte(`<html>
			<head><title>Node Exporter</title></head>
			<body>
			<h1>Node Exporter</h1>
			<p><a href="` + *metricsPath + `">Metrics</a></p>
			</body>
			</html>`))
	})

	log.Infof("Starting node_exporter v%s at %s", Version, *listenAddress)
	err = http.ListenAndServe(*listenAddress, nil)
	if err != nil {
		log.Fatal(err)
	}
}
示例#16
0
// Stop stops sending samples to the remote storage and waits for pending
// sends to complete.
func (t *StorageQueueManager) Stop() {
	log.Infof("Stopping remote storage...")
	for _, shard := range t.shards {
		close(shard)
	}
	t.wg.Wait()
	log.Info("Remote storage stopped.")
}
示例#17
0
// Stop stops sending samples to the remote storage and waits for pending
// sends to complete.
func (t *StorageQueueManager) Stop() {
	log.Infof("Stopping remote storage...")
	close(t.queue)
	<-t.drained
	for i := 0; i < maxConcurrentSends; i++ {
		t.sendSemaphore <- true
	}
	log.Info("Remote storage stopped.")
}
示例#18
0
// Run serves the HTTP endpoints.
func (h *Handler) Run() {
	log.Infof("Listening on %s", h.options.ListenAddress)
	server := &http.Server{
		Addr:     h.options.ListenAddress,
		Handler:  h.router,
		ErrorLog: log.NewErrorLogger(),
	}
	h.listenErrCh <- server.ListenAndServe()
}
示例#19
0
func main() {
	flag.Parse()

	exporter := NewExporter(*scrapeURI)
	prometheus.MustRegister(exporter)

	log.Infof("Starting Server: %s", *listeningAddress)
	http.Handle(*metricsEndpoint, prometheus.Handler())
	log.Fatal(http.ListenAndServe(*listeningAddress, nil))
}
示例#20
0
// Start implements Storage.
func (s *memorySeriesStorage) Start() (err error) {
	var syncStrategy syncStrategy
	switch s.options.SyncStrategy {
	case Never:
		syncStrategy = func() bool { return false }
	case Always:
		syncStrategy = func() bool { return true }
	case Adaptive:
		syncStrategy = func() bool { return s.calculatePersistenceUrgencyScore() < 1 }
	default:
		panic("unknown sync strategy")
	}

	var p *persistence
	p, err = newPersistence(
		s.options.PersistenceStoragePath,
		s.options.Dirty, s.options.PedanticChecks,
		syncStrategy,
		s.options.MinShrinkRatio,
	)
	if err != nil {
		return err
	}
	s.persistence = p
	// Persistence must start running before loadSeriesMapAndHeads() is called.
	go s.persistence.run()

	defer func() {
		if err != nil {
			if e := p.close(); e != nil {
				log.Errorln("Error closing persistence:", e)
			}
		}
	}()

	log.Info("Loading series map and head chunks...")
	s.fpToSeries, s.numChunksToPersist, err = p.loadSeriesMapAndHeads()
	if err != nil {
		return err
	}
	log.Infof("%d series loaded.", s.fpToSeries.length())
	s.numSeries.Set(float64(s.fpToSeries.length()))

	s.mapper, err = newFPMapper(s.fpToSeries, p)
	if err != nil {
		return err
	}

	go s.handleEvictList()
	go s.handleQuarantine()
	go s.logThrottling()
	go s.loop()

	return nil
}
示例#21
0
func (dms *DiskMetricStore) loop(persistenceInterval time.Duration) {
	lastPersist := time.Now()
	persistScheduled := false
	lastWrite := time.Time{}
	persistDone := make(chan time.Time)
	var persistTimer *time.Timer

	checkPersist := func() {
		if !persistScheduled && lastWrite.After(lastPersist) {
			persistTimer = time.AfterFunc(
				persistenceInterval-lastWrite.Sub(lastPersist),
				func() {
					persistStarted := time.Now()
					if err := dms.persist(); err != nil {
						log.Errorln("Error persisting metrics:", err)
					} else {
						log.Infof(
							"Metrics persisted to '%s'.",
							dms.persistenceFile,
						)
					}
					persistDone <- persistStarted
				},
			)
			persistScheduled = true
		}
	}

	for {
		select {
		case wr := <-dms.writeQueue:
			dms.processWriteRequest(wr)
			lastWrite = time.Now()
			checkPersist()
		case lastPersist = <-persistDone:
			persistScheduled = false
			checkPersist() // In case something has been written in the meantime.
		case <-dms.drain:
			// Prevent a scheduled persist from firing later.
			if persistTimer != nil {
				persistTimer.Stop()
			}
			// Now draining...
			for {
				select {
				case wr := <-dms.writeQueue:
					dms.processWriteRequest(wr)
				default:
					dms.done <- dms.persist()
					return
				}
			}
		}
	}
}
示例#22
0
// maybeAddMapping is only used internally. It takes a detected collision and
// adds it to the collisions map if not yet there. In any case, it returns the
// truly unique fingerprint for the colliding metric.
func (m *fpMapper) maybeAddMapping(
	fp model.Fingerprint,
	collidingMetric model.Metric,
) (model.Fingerprint, error) {
	ms := metricToUniqueString(collidingMetric)
	m.mtx.RLock()
	mappedFPs, ok := m.mappings[fp]
	m.mtx.RUnlock()
	if ok {
		// fp is locked by the caller, so no further locking required.
		mappedFP, ok := mappedFPs[ms]
		if ok {
			return mappedFP, nil // Existing mapping.
		}
		// A new mapping has to be created.
		mappedFP = m.nextMappedFP()
		mappedFPs[ms] = mappedFP
		m.mtx.Lock()
		// Checkpoint mappings after each change.
		err := m.p.checkpointFPMappings(m.mappings)
		m.mtx.Unlock()
		log.Infof(
			"Collision detected for fingerprint %v, metric %v, mapping to new fingerprint %v.",
			fp, collidingMetric, mappedFP,
		)
		return mappedFP, err
	}
	// This is the first collision for fp.
	mappedFP := m.nextMappedFP()
	mappedFPs = map[string]model.Fingerprint{ms: mappedFP}
	m.mtx.Lock()
	m.mappings[fp] = mappedFPs
	m.mappingsCounter.Inc()
	// Checkpoint mappings after each change.
	err := m.p.checkpointFPMappings(m.mappings)
	m.mtx.Unlock()
	log.Infof(
		"Collision detected for fingerprint %v, metric %v, mapping to new fingerprint %v.",
		fp, collidingMetric, mappedFP,
	)
	return mappedFP, err
}
示例#23
0
func (e *Exporter) scrapeTimers(json *gabs.Container) {
	elements, _ := json.ChildrenMap()
	for key, element := range elements {
		new, err := e.scrapeTimer(key, element)
		if err != nil {
			log.Debug(err)
		} else if new {
			log.Infof("Added timer %q\n", key)
		}
	}
}
func main() {
	flag.Parse()
	exporter := NewTinystatsExporter()
	prometheus.MustRegister(exporter)
	http.Handle(*metricsPath, prometheus.Handler())
	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		w.Write([]byte(`<html>
                <head><title>Tinystats Exporter</title></head>
                <body>
                   <h1>Tinystats Exporter</h1>
                   <p><a href='` + *metricsPath + `'>Metrics</a></p>
                   </body>
                </html>
              `))
	})
	log.Infof("Starting Server: %s", *listenAddress)
	log.Infof("ipv4tinystats file: %s", *ipv4TinystatsFile)
	log.Infof("ipv6tinystats file: %s", *ipv6TinystatsFile)
	log.Fatal(http.ListenAndServe(*listenAddress, nil))
}
示例#25
0
func (c *graphiteCollector) processLine(line string) {
	parts := strings.Split(line, " ")
	if len(parts) != 3 {
		log.Infof("Invalid part count of %d in line: %s", len(parts), line)
		return
	}
	var name string
	labels, present := c.mapper.getMapping(parts[0])
	if present {
		name = labels["name"]
		delete(labels, "name")
	} else {
		// If graphite.mapping-strict-match flag is set, we will drop this metric.
		if *strictMatch {
			return
		}
		name = invalidMetricChars.ReplaceAllString(parts[0], "_")
	}

	value, err := strconv.ParseFloat(parts[1], 64)
	if err != nil {
		log.Infof("Invalid value in line: %s", line)
		return
	}
	timestamp, err := strconv.ParseFloat(parts[2], 64)
	if err != nil {
		log.Infof("Invalid timestamp in line: %s", line)
		return
	}
	sample := graphiteSample{
		OriginalName: parts[0],
		Name:         name,
		Value:        value,
		Labels:       labels,
		Type:         prometheus.GaugeValue,
		Help:         fmt.Sprintf("Graphite metric %s", parts[0]),
		Timestamp:    time.Unix(int64(timestamp), int64(math.Mod(timestamp, 1.0)*1e9)),
	}
	lastProcessed.Set(float64(time.Now().UnixNano()) / 1e9)
	c.ch <- &sample
}
// Collect fetches the stats of a user and delivers them
// as Prometheus metrics. It implements prometheus.Collector.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
	e.mutex.Lock() // To protect metrics from concurrent collects.
	defer e.mutex.Unlock()
	if err := e.scrape(ch); err != nil {
		log.Infof("Error scraping tinystats: %s", err)
	}
	e.ipv4QueryA.Collect(ch)
	e.ipv4QueryNS.Collect(ch)
	e.ipv4QueryCNAME.Collect(ch)
	e.ipv4QuerySOA.Collect(ch)
	e.ipv4QueryPTR.Collect(ch)
	e.ipv4QueryHINFO.Collect(ch)
	e.ipv4QueryMX.Collect(ch)
	e.ipv4QueryTXT.Collect(ch)
	e.ipv4QueryRP.Collect(ch)
	e.ipv4QuerySIG.Collect(ch)
	e.ipv4QueryKEY.Collect(ch)
	e.ipv4QueryAAAA.Collect(ch)
	e.ipv4QueryAXFR.Collect(ch)
	e.ipv4QueryANY.Collect(ch)
	e.ipv4QueryTOTAL.Collect(ch)
	e.ipv4QueryOTHER.Collect(ch)
	e.ipv4QueryNOTAUTH.Collect(ch)
	e.ipv4QueryNOTIMPL.Collect(ch)
	e.ipv4QueryBADCLASS.Collect(ch)
	e.ipv4QueryNOQUERY.Collect(ch)

	e.ipv6QueryA.Collect(ch)
	e.ipv6QueryNS.Collect(ch)
	e.ipv6QueryCNAME.Collect(ch)
	e.ipv6QuerySOA.Collect(ch)
	e.ipv6QueryPTR.Collect(ch)
	e.ipv6QueryHINFO.Collect(ch)
	e.ipv6QueryMX.Collect(ch)
	e.ipv6QueryTXT.Collect(ch)
	e.ipv6QueryRP.Collect(ch)
	e.ipv6QuerySIG.Collect(ch)
	e.ipv6QueryKEY.Collect(ch)
	e.ipv6QueryAAAA.Collect(ch)
	e.ipv6QueryAXFR.Collect(ch)
	e.ipv6QueryANY.Collect(ch)
	e.ipv6QueryTOTAL.Collect(ch)
	e.ipv6QueryOTHER.Collect(ch)
	e.ipv6QueryNOTAUTH.Collect(ch)
	e.ipv6QueryNOTIMPL.Collect(ch)
	e.ipv6QueryBADCLASS.Collect(ch)
	e.ipv6QueryNOQUERY.Collect(ch)

	return
}
示例#27
0
// Takes a registers a
// SetMetricFamilyInjectionHook.
func NewTextFileCollector() (Collector, error) {
	c := &textFileCollector{
		path: *textFileDirectory,
	}

	if c.path == "" {
		// This collector is enabled by default, so do not fail if
		// the flag is not passed.
		log.Infof("No directory specified, see --collector.textfile.directory")
	} else {
		prometheus.SetMetricFamilyInjectionHook(c.parseTextFiles)
	}

	return c, nil
}
func main() {
	var (
		listenAddress             = flag.String("web.listen-address", ":9101", "Address to listen on for web interface and telemetry.")
		metricsPath               = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.")
		haProxyScrapeURI          = flag.String("haproxy.scrape-uri", "http://localhost/;csv", "URI on which to scrape HAProxy.")
		haProxyServerMetricFields = flag.String("haproxy.server-metric-fields", serverMetrics.String(), "Comma-seperated list of exported server metrics. See http://cbonte.github.io/haproxy-dconv/configuration-1.5.html#9.1")
		haProxyTimeout            = flag.Duration("haproxy.timeout", 5*time.Second, "Timeout for trying to get stats from HAProxy.")
		haProxyPidFile            = flag.String("haproxy.pid-file", "", "Path to haproxy's pid file.")
	)
	flag.Parse()

	selectedServerMetrics, err := filterServerMetrics(*haProxyServerMetricFields)
	if err != nil {
		log.Fatal(err)
	}

	exporter := NewExporter(*haProxyScrapeURI, selectedServerMetrics, *haProxyTimeout)
	prometheus.MustRegister(exporter)

	if *haProxyPidFile != "" {
		procExporter := prometheus.NewProcessCollectorPIDFn(
			func() (int, error) {
				content, err := ioutil.ReadFile(*haProxyPidFile)
				if err != nil {
					return 0, fmt.Errorf("Can't read pid file: %s", err)
				}
				value, err := strconv.Atoi(strings.TrimSpace(string(content)))
				if err != nil {
					return 0, fmt.Errorf("Can't parse pid file: %s", err)
				}
				return value, nil
			}, namespace)
		prometheus.MustRegister(procExporter)
	}

	log.Infof("Starting Server: %s", *listenAddress)
	http.Handle(*metricsPath, prometheus.Handler())
	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		w.Write([]byte(`<html>
             <head><title>Haproxy Exporter</title></head>
             <body>
             <h1>Haproxy Exporter</h1>
             <p><a href='` + *metricsPath + `'>Metrics</a></p>
             </body>
             </html>`))
	})
	log.Fatal(http.ListenAndServe(*listenAddress, nil))
}
示例#29
0
// Run serves the HTTP endpoints.
func (h *Handler) Run() {
	log.Infof("Listening on %s", h.options.ListenAddress)
	server := &http.Server{
		Addr:        h.options.ListenAddress,
		Handler:     h.router,
		ErrorLog:    log.NewErrorLogger(),
		ReadTimeout: h.options.ReadTimeout,
	}
	listener, err := net.Listen("tcp", h.options.ListenAddress)
	if err != nil {
		h.listenErrCh <- err
	} else {
		limitedListener := netutil.LimitListener(listener, h.options.MaxConnections)
		h.listenErrCh <- server.Serve(limitedListener)
	}
}
示例#30
0
文件: main.go 项目: xperimental/yovpn
func main() {
	config := createConfig()

	log.Info("Create provisioner...")
	provisioner, err := provisioner.NewProvisioner(config.Token)
	if err != nil {
		log.Fatal(err)
	}
	go backgroundRunner(provisioner)

	log.Info("Create server...")
	server := web.CreateServer(provisioner)

	log.Infof("Listen on %s", config.Port)
	log.Fatal(http.ListenAndServe(":"+config.Port, server))
}