コード例 #1
0
ファイル: queue_manager.go プロジェクト: bluecmd/prometheus
// Run continuously sends samples to the remote storage.
func (t *StorageQueueManager) Run() {
	defer func() {
		close(t.drained)
	}()

	// Send batches of at most maxSamplesPerSend samples to the remote storage.
	// If we have fewer samples than that, flush them out after a deadline
	// anyways.
	for {
		select {
		case s, ok := <-t.queue:
			if !ok {
				log.Infof("Flushing %d samples to remote storage...", len(t.pendingSamples))
				t.flush()
				log.Infof("Done flushing.")
				return
			}

			t.pendingSamples = append(t.pendingSamples, s)

			for len(t.pendingSamples) >= maxSamplesPerSend {
				go t.sendSamples(t.pendingSamples[:maxSamplesPerSend])
				t.pendingSamples = t.pendingSamples[maxSamplesPerSend:]
			}
		case <-time.After(batchSendDeadline):
			t.flush()
		}
	}
}
コード例 #2
0
ファイル: serverset.go プロジェクト: bluecmd/prometheus
func (tc *zookeeperTreeCache) loop(failureMode bool) {
	retryChan := make(chan struct{})

	failure := func() {
		failureMode = true
		time.AfterFunc(time.Second*10, func() {
			retryChan <- struct{}{}
		})
	}
	if failureMode {
		failure()
	}

	for {
		select {
		case ev := <-tc.head.events:
			log.Debugf("Received Zookeeper event: %s", ev)
			if failureMode {
				continue
			}
			if ev.Type == zk.EventNotWatching {
				log.Infof("Lost connection to Zookeeper.")
				failure()
			} else {
				path := strings.TrimPrefix(ev.Path, tc.prefix)
				parts := strings.Split(path, "/")
				node := tc.head
				for _, part := range parts[1:] {
					childNode := node.children[part]
					if childNode == nil {
						childNode = &zookeeperTreeCacheNode{
							events:   tc.head.events,
							children: map[string]*zookeeperTreeCacheNode{},
							done:     make(chan struct{}, 1),
						}
						node.children[part] = childNode
					}
					node = childNode
				}
				err := tc.recursiveNodeUpdate(ev.Path, node)
				if err != nil {
					log.Errorf("Error during processing of Zookeeper event: %s", err)
					failure()
				}
			}
		case <-retryChan:
			log.Infof("Attempting to resync state with Zookeeper")
			err := tc.recursiveNodeUpdate(tc.prefix, tc.head)
			if err == nil {
				failureMode = false
			} else {
				log.Errorf("Error during Zookeeper resync: %s", err)
				failure()
			}
		case <-tc.stop:
			close(tc.events)
			return
		}
	}
}
コード例 #3
0
func (p *persistence) rebuildLabelIndexes(
	fpToSeries map[clientmodel.Fingerprint]*memorySeries,
) error {
	count := 0
	log.Info("Rebuilding label indexes.")
	log.Info("Indexing metrics in memory.")
	for fp, s := range fpToSeries {
		p.indexMetric(fp, s.metric)
		count++
		if count%10000 == 0 {
			log.Infof("%d metrics queued for indexing.", count)
		}
	}
	log.Info("Indexing archived metrics.")
	var fp codable.Fingerprint
	var m codable.Metric
	if err := p.archivedFingerprintToMetrics.ForEach(func(kv index.KeyValueAccessor) error {
		if err := kv.Key(&fp); err != nil {
			return err
		}
		if err := kv.Value(&m); err != nil {
			return err
		}
		p.indexMetric(clientmodel.Fingerprint(fp), clientmodel.Metric(m))
		count++
		if count%10000 == 0 {
			log.Infof("%d metrics queued for indexing.", count)
		}
		return nil
	}); err != nil {
		return err
	}
	log.Info("All requests for rebuilding the label indexes queued. (Actual processing may lag behind.)")
	return nil
}
コード例 #4
0
ファイル: node_exporter.go プロジェクト: ra1fh/node_exporter
func main() {
	flag.Parse()

	if *printCollectors {
		collectorNames := make(sort.StringSlice, 0, len(collector.Factories))
		for n := range collector.Factories {
			collectorNames = append(collectorNames, n)
		}
		collectorNames.Sort()
		fmt.Printf("Available collectors:\n")
		for _, n := range collectorNames {
			fmt.Printf(" - %s\n", n)
		}
		return
	}
	collectors, err := loadCollectors()
	if err != nil {
		log.Fatalf("Couldn't load collectors: %s", err)
	}

	log.Infof("Enabled collectors:")
	for n := range collectors {
		log.Infof(" - %s", n)
	}

	nodeCollector := NodeCollector{collectors: collectors}
	prometheus.MustRegister(nodeCollector)

	sigUsr1 := make(chan os.Signal)
	signal.Notify(sigUsr1, syscall.SIGUSR1)

	handler := prometheus.Handler()
	if *authUser != "" || *authPass != "" {
		if *authUser == "" || *authPass == "" {
			log.Fatal("You need to specify -auth.user and -auth.pass to enable basic auth")
		}
		handler = &basicAuthHandler{
			handler:  prometheus.Handler().ServeHTTP,
			user:     *authUser,
			password: *authPass,
		}
	}

	http.Handle(*metricsPath, handler)
	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		w.Write([]byte(`<html>
			<head><title>Node Exporter</title></head>
			<body>
			<h1>Node Exporter</h1>
			<p><a href="` + *metricsPath + `">Metrics</a></p>
			</body>
			</html>`))
	})

	log.Infof("Starting node_exporter v%s at %s", Version, *listenAddress)
	err = http.ListenAndServe(*listenAddress, nil)
	if err != nil {
		log.Fatal(err)
	}
}
コード例 #5
0
func (e *Exporter) setMetrics(services <-chan []*consul_api.ServiceEntry, checks <-chan []*consul_api.HealthCheck) {

	// Each service will be an array of ServiceEntry structs.
	running := true
	for running {
		select {
		case service, b := <-services:
			running = b
			if len(service) == 0 {
				// Not sure this should ever happen, but catch it just in case...
				continue
			}

			// We should have one ServiceEntry per node, so use that for total nodes.
			e.serviceNodesTotal.WithLabelValues(service[0].Service.Service).Set(float64(len(service)))

			for _, entry := range service {
				// We have a Node, a Service, and one or more Checks. Our
				// service-node combo is passing if all checks have a `status`
				// of "passing."

				passing := 1

				for _, hc := range entry.Checks {
					if hc.Status != consul.HealthPassing {
						passing = 0
						break
					}
				}

				log.Infof("%v/%v status is %v", entry.Service.Service, entry.Node.Node, passing)

				e.serviceNodesHealthy.WithLabelValues(entry.Service.Service, entry.Node.Node).Set(float64(passing))
			}
		case entry, b := <-checks:
			running = b
			for _, hc := range entry {
				passing := 1
				if hc.ServiceID == "" {
					if hc.Status != consul.HealthPassing {
						passing = 0
					}
					e.nodeChecks.WithLabelValues(hc.CheckID, hc.Node).Set(float64(passing))
					log.Infof("CHECKS: %v/%v status is %d", hc.CheckID, hc.Node, passing)
				}
			}
		}
	}

}
コード例 #6
0
func Execute(name string, c collector.Collector, ch chan<- prometheus.Metric) {
	begin := time.Now()
	err := c.Update(ch)
	duration := time.Since(begin)
	var result string

	if err != nil {
		log.Infof("ERROR: %s failed after %fs: %s", name, duration.Seconds(), err)
		result = "error"
	} else {
		log.Infof("OK: %s success after %fs.", name, duration.Seconds())
		result = "success"
	}
	scrapeDurations.WithLabelValues(name, result).Observe(duration.Seconds())
}
コード例 #7
0
ファイル: main.go プロジェクト: mjseid/prometheus
func reloadConfig(filename string, rls ...Reloadable) (success bool) {
	log.Infof("Loading configuration file %s", filename)
	defer func() {
		if success {
			configSuccess.Set(1)
			configSuccessTime.Set(float64(time.Now().Unix()))
		} else {
			configSuccess.Set(0)
		}
	}()

	conf, err := config.LoadFile(filename)
	if err != nil {
		log.Errorf("Couldn't load configuration (-config.file=%s): %v", filename, err)
		// TODO(julius): Remove this notice when releasing 0.17.0 or 0.18.0.
		if err.Error() == "unknown fields in global config: labels" {
			log.Errorf("NOTE: The 'labels' setting in the global configuration section has been renamed to 'external_labels' and now has changed semantics (see release notes at https://github.com/prometheus/prometheus/blob/master/CHANGELOG.md). Please update your configuration file accordingly.")
		}
		return false
	}
	success = true

	for _, rl := range rls {
		success = success && rl.ApplyConfig(conf)
	}
	return success
}
コード例 #8
0
func main() {
	flag.Parse()

	handler := prometheus.Handler()
	prometheus.MustRegister(watts)
	prometheus.MustRegister(updatesPerPost)
	prometheus.MustRegister(voltage)

	http.Handle(*metricsPath, handler)
	http.HandleFunc("/activate", activateHandler)
	http.HandleFunc("/post", postHandler)
	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		w.Write([]byte(`<html>
			<head><title>TED Exporter</title></head>
			<body>
			<h1>TED Exporter</h1>
			<p><a href="` + *metricsPath + `">Metrics</a></p>
			</body>
			</html>`))
	})

	log.Infof("Starting ted_exporter v%s at %s", Version, *listenAddress)
	err := http.ListenAndServe(*listenAddress, nil)
	if err != nil {
		log.Fatal(err)
	}
}
コード例 #9
0
func main() {
	flag.Parse()

	dsn := os.Getenv("DATA_SOURCE_NAME")
	if len(dsn) == 0 {
		log.Fatal("couldn't find environment variable DATA_SOURCE_NAME")
	}

	exporter := NewMySQLExporter(dsn)
	prometheus.MustRegister(exporter)
	http.Handle(*metricPath, prometheus.Handler())
	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		w.Write([]byte(`<html>
<head><title>MySQLd exporter</title></head>
<body>
<h1>MySQLd exporter</h1>
<p><a href='` + *metricPath + `'>Metrics</a></p>
</body>
</html>
`))
	})

	log.Infof("Starting Server: %s", *listenAddress)
	log.Fatal(http.ListenAndServe(*listenAddress, nil))
}
コード例 #10
0
func main() {
	flag.Parse()

	dsn := os.Getenv("DATA_SOURCE_NAME")
	if len(dsn) == 0 {
		log.Fatal("couldn't find environment variable DATA_SOURCE_NAME")
	}

	exporter := NewExporter(dsn)
	prometheus.MustRegister(exporter)

	handler := prometheus.Handler()
	if *authUser != "" || *authPass != "" {
		if *authUser == "" || *authPass == "" {
			log.Fatal("You need to specify -auth.user and -auth.pass to enable basic auth")
		}
		handler = &basicAuthHandler{
			handler:  prometheus.Handler().ServeHTTP,
			user:     *authUser,
			password: *authPass,
		}
	}
	http.Handle(*metricPath, handler)
	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		w.Write(landingPage)
	})

	log.Infof("Starting Server: %s", *listenAddress)
	log.Fatal(http.ListenAndServe(*listenAddress, nil))
}
コード例 #11
0
func main() {
	var (
		listenAddress = flag.String("web.listen-address", ":9107", "Address to listen on for web interface and telemetry.")
		metricsPath   = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.")
		consulServer  = flag.String("consul.server", "localhost:8500", "HTTP API address of a Consul server or agent.")
		kvPrefix      = flag.String("kv.prefix", "", "Prefix from which to expose key/value pairs.")
		kvFilter      = flag.String("kv.filter", ".*", "Regex that determines which keys to expose.")
	)
	flag.Parse()

	exporter := NewExporter(*consulServer, *kvPrefix, *kvFilter)
	prometheus.MustRegister(exporter)

	log.Infof("Starting Server: %s", *listenAddress)
	http.Handle(*metricsPath, prometheus.Handler())
	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		w.Write([]byte(`<html>
             <head><title>Consul Exporter</title></head>
             <body>
             <h1>Consul Exporter</h1>
             <p><a href='` + *metricsPath + `'>Metrics</a></p>
             </body>
             </html>`))
	})
	log.Fatal(http.ListenAndServe(*listenAddress, nil))
}
コード例 #12
0
ファイル: manager.go プロジェクト: robbiet480/alertmanager
// Recomputes all currently uninhibited/unsilenced alerts and queues
// notifications for them according to their RepeatRate.
func (s *memoryAlertManager) refreshNotifications() {
	s.mu.Lock()
	defer s.mu.Unlock()

	s.needsNotificationRefresh = false

	l := s.filteredLabelSets(false)

	numSent := 0
	for _, lb := range l {
		agg := s.aggregates[lb.Fingerprint()]
		if agg.NextNotification.After(time.Now()) {
			continue
		}
		if agg.Rule != nil {
			s.notifier.QueueNotification(agg.Alert, notificationOpTrigger, agg.Rule.NotificationConfigName)
			agg.LastNotification = time.Now()
			agg.NextNotification = agg.LastNotification.Add(agg.Rule.RepeatRate)
			numSent++
		}
	}
	if numSent > 0 {
		log.Infof("Sent %d notifications", numSent)
		heap.Init(&s.aggregatesByNextNotification)
	}
}
コード例 #13
0
ファイル: watcher.go プロジェクト: robbiet480/alertmanager
func (w *fileWatcher) Watch(cb ReloadCallback) {
	watcher, err := fsnotify.NewWatcher()
	if err != nil {
		log.Fatal(err)
	}

	err = watcher.WatchFlags(w.fileName, fsnotify.FSN_MODIFY)
	if err != nil {
		log.Fatal(err)
	}

	for {
		select {
		case ev := <-watcher.Event:
			log.Infof("Config file changed (%s), attempting reload", ev)
			conf, err := LoadFromFile(w.fileName)
			if err != nil {
				log.Error("Error loading new config: ", err)
				failedConfigReloads.Inc()
			} else {
				cb(&conf)
				log.Info("Config reloaded successfully")
				configReloads.Inc()
			}
			// Re-add the file watcher since it can get lost on some changes. E.g.
			// saving a file with vim results in a RENAME-MODIFY-DELETE event
			// sequence, after which the newly written file is no longer watched.
			err = watcher.WatchFlags(w.fileName, fsnotify.FSN_MODIFY)
		case err := <-watcher.Error:
			log.Error("Error watching config: ", err)
		}
	}
}
コード例 #14
0
ファイル: manager.go プロジェクト: robbiet480/alertmanager
// Run a single memoryAlertManager iteration.
func (s *memoryAlertManager) runIteration() {
	s.removeExpiredAggregates()
	s.checkNotificationRepeats()
	if refresh, reasons := s.refreshNeeded(); refresh {
		log.Infof("Recomputing notification outputs (%s)", strings.Join(reasons, ", "))
		s.refreshNotifications()
	}
}
コード例 #15
0
ファイル: queue_manager.go プロジェクト: bluecmd/prometheus
// Stop stops sending samples to the remote storage and waits for pending
// sends to complete.
func (t *StorageQueueManager) Stop() {
	log.Infof("Stopping remote storage...")
	close(t.queue)
	<-t.drained
	for i := 0; i < maxConcurrentSends; i++ {
		t.sendSemaphore <- true
	}
	log.Info("Remote storage stopped.")
}
コード例 #16
0
ファイル: persistence.go プロジェクト: bluecmd/prometheus
// checkpointFPMappings persists the fingerprint mappings. This method is not
// goroutine-safe.
//
// Description of the file format, v1:
//
// (1) Magic string (const mappingsMagicString).
//
// (2) Uvarint-encoded format version (const mappingsFormatVersion).
//
// (3) Uvarint-encoded number of mappings in fpMappings.
//
// (4) Repeated once per mapping:
//
// (4.1) The raw fingerprint as big-endian uint64.
//
// (4.2) The uvarint-encoded number of sub-mappings for the raw fingerprint.
//
// (4.3) Repeated once per sub-mapping:
//
// (4.3.1) The uvarint-encoded length of the unique metric string.
// (4.3.2) The unique metric string.
// (4.3.3) The mapped fingerprint as big-endian uint64.
func (p *persistence) checkpointFPMappings(fpm fpMappings) (err error) {
	log.Info("Checkpointing fingerprint mappings...")
	begin := time.Now()
	f, err := os.OpenFile(p.mappingsTempFileName(), os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0640)
	if err != nil {
		return
	}

	defer func() {
		f.Sync()
		closeErr := f.Close()
		if err != nil {
			return
		}
		err = closeErr
		if err != nil {
			return
		}
		err = os.Rename(p.mappingsTempFileName(), p.mappingsFileName())
		duration := time.Since(begin)
		log.Infof("Done checkpointing fingerprint mappings in %v.", duration)
	}()

	w := bufio.NewWriterSize(f, fileBufSize)

	if _, err = w.WriteString(mappingsMagicString); err != nil {
		return
	}
	if _, err = codable.EncodeUvarint(w, mappingsFormatVersion); err != nil {
		return
	}
	if _, err = codable.EncodeUvarint(w, uint64(len(fpm))); err != nil {
		return
	}

	for fp, mappings := range fpm {
		if err = codable.EncodeUint64(w, uint64(fp)); err != nil {
			return
		}
		if _, err = codable.EncodeUvarint(w, uint64(len(mappings))); err != nil {
			return
		}
		for ms, mappedFP := range mappings {
			if _, err = codable.EncodeUvarint(w, uint64(len(ms))); err != nil {
				return
			}
			if _, err = w.WriteString(ms); err != nil {
				return
			}
			if err = codable.EncodeUint64(w, uint64(mappedFP)); err != nil {
				return
			}
		}
	}
	err = w.Flush()
	return
}
コード例 #17
0
func main() {
	flag.Parse()

	// Initialize Intel Edison
	edisonAdaptor := edison.NewEdisonAdaptor("edison")
	edisonAdaptor.Connect()

	lightSensor := gpio.NewGroveLightSensorDriver(edisonAdaptor, "light", *sensorLightPin, *sensorPollingInterval)
	lightSensor.Start()
	gobot.On(lightSensor.Event("data"), func(data interface{}) {
		raw := float64(data.(int))
		// convert to lux
		resistance := (1023.0 - raw) * 10.0 / raw * 15.0
		light = 10000.0 / math.Pow(resistance, 4.0/3.0)
		lightUpdated = time.Now()
		log.Debugln("illuminance: ", light)
	})

	soundSensor := gpio.NewGroveSoundSensorDriver(edisonAdaptor, "sound", *sensorSoundPin, *sensorPollingInterval)
	soundSensor.Start()
	gobot.On(soundSensor.Event("data"), func(data interface{}) {
		sound = float64(data.(int))
		soundUpdated = time.Now()
		log.Debugln("sound level: ", sound)
	})

	tempSensor := gpio.NewGroveTemperatureSensorDriver(edisonAdaptor, "temp", *sensorTempPin, *sensorPollingInterval)
	tempSensor.Start()
	gobot.On(tempSensor.Event("data"), func(data interface{}) {
		celsius = data.(float64)
		fahrenheit = celsius*1.8 + 32
		tempUpdated = time.Now()
		log.Debugln("temperature: ", celsius)
	})

	// Initialize prometheus exporter
	exporter := NewExporter()
	prometheus.MustRegister(exporter)

	log.Infof("Listening on: %s", *listenAddress)
	http.Handle(*metricPath, prometheus.Handler())
	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		w.Write([]byte(`
			<html>
			<head>
				<title>IoT Edison exporter</title>
			</head>
			<body>
				<h1>Prometheus exporter for sensor metrics from Intel Edison</h1>
				<p><a href='` + *metricPath + `'>Metrics</a></p>
			</body>
			</html>
		`))
	})
	log.Fatal(http.ListenAndServe(*listenAddress, nil))
}
コード例 #18
0
ファイル: notifier.go プロジェクト: tamsky/alertmanager
func (n *notifier) sendHipChatNotification(op notificationOp, config *pb.HipChatConfig, a *Alert) error {
	// https://www.hipchat.com/docs/apiv2/method/send_room_notification
	incidentKey := a.Fingerprint()
	color := ""
	status := ""
	message := ""
	messageFormat := ""
	switch op {
	case notificationOpTrigger:
		color = config.GetColor()
		status = "firing"
	case notificationOpResolve:
		color = config.GetColorResolved()
		status = "resolved"
	}
	if config.GetMessageFormat() == pb.HipChatConfig_TEXT {
		message = fmt.Sprintf("%s%s %s: %s", config.GetPrefix(), a.Labels["alertname"], status, a.Summary)
		messageFormat = "text"
	} else {
		message = fmt.Sprintf("%s<b>%s %s</b>: %s (<a href='%s'>view</a>)", config.GetPrefix(), html.EscapeString(a.Labels["alertname"]), status, html.EscapeString(a.Summary), a.Payload["generatorURL"])
		messageFormat = "html"
	}
	buf, err := json.Marshal(map[string]interface{}{
		"color":          color,
		"message":        message,
		"notify":         config.GetNotify(),
		"message_format": messageFormat,
	})
	if err != nil {
		return err
	}

	timeout := time.Duration(5 * time.Second)
	client := http.Client{
		Timeout: timeout,
	}
	resp, err := client.Post(
		fmt.Sprintf("%s/room/%d/notification?auth_token=%s", *hipchatURL, config.GetRoomId(), config.GetAuthToken()),
		contentTypeJSON,
		bytes.NewBuffer(buf),
	)
	if err != nil {
		return err
	}
	defer resp.Body.Close()

	respBuf, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		return err
	}

	log.Infof("Sent HipChat notification: %v: HTTP %d: %s", incidentKey, resp.StatusCode, respBuf)
	// BUG: Check response for result of operation.
	return nil
}
コード例 #19
0
ファイル: mapper.go プロジェクト: bluecmd/prometheus
// maybeAddMapping is only used internally. It takes a detected collision and
// adds it to the collisions map if not yet there. In any case, it returns the
// truly unique fingerprint for the colliding metric.
func (m *fpMapper) maybeAddMapping(
	fp clientmodel.Fingerprint,
	collidingMetric clientmodel.Metric,
) (clientmodel.Fingerprint, error) {
	ms := metricToUniqueString(collidingMetric)
	m.mtx.RLock()
	mappedFPs, ok := m.mappings[fp]
	m.mtx.RUnlock()
	if ok {
		// fp is locked by the caller, so no further locking required.
		mappedFP, ok := mappedFPs[ms]
		if ok {
			return mappedFP, nil // Existing mapping.
		}
		// A new mapping has to be created.
		mappedFP = m.nextMappedFP()
		mappedFPs[ms] = mappedFP
		m.mtx.Lock()
		// Checkpoint mappings after each change.
		err := m.p.checkpointFPMappings(m.mappings)
		m.mtx.Unlock()
		log.Infof(
			"Collision detected for fingerprint %v, metric %v, mapping to new fingerprint %v.",
			fp, collidingMetric, mappedFP,
		)
		return mappedFP, err
	}
	// This is the first collision for fp.
	mappedFP := m.nextMappedFP()
	mappedFPs = map[string]clientmodel.Fingerprint{ms: mappedFP}
	m.mtx.Lock()
	m.mappings[fp] = mappedFPs
	m.mappingsCounter.Inc()
	// Checkpoint mappings after each change.
	err := m.p.checkpointFPMappings(m.mappings)
	m.mtx.Unlock()
	log.Infof(
		"Collision detected for fingerprint %v, metric %v, mapping to new fingerprint %v.",
		fp, collidingMetric, mappedFP,
	)
	return mappedFP, err
}
コード例 #20
0
ファイル: main.go プロジェクト: rhuss/jenkins_exporter
func main() {
	flag.Parse()

	c := newJenkinsMetricsCollector()
	prometheus.MustRegister(c)

	http.Handle(*metricsPath, prometheus.Handler())

	log.Infof("Starting Server: %s", *webAddress)
	http.ListenAndServe(*webAddress, nil)
}
コード例 #21
0
ファイル: textfile.go プロジェクト: fin09pcap/node_exporter
// Takes a registers a
// SetMetricFamilyInjectionHook.
func NewTextFileCollector() (Collector, error) {
	if *textFileDirectory == "" {
		// This collector is enabled by default, so do not fail if
		// the flag is not passed.
		log.Infof("No directory specified, see --textfile.directory")
	} else {
		prometheus.SetMetricFamilyInjectionHook(parseTextFiles)
	}

	return &textFileCollector{}, nil
}
コード例 #22
0
ファイル: main.go プロジェクト: RichiH/graphite_exporter
func (c *graphiteCollector) processLine(line string) {
	parts := strings.Split(line, " ")
	if len(parts) != 3 {
		log.Infof("Invalid part count of %d in line: %s", len(parts), line)
		return
	}
	var name string
	labels, present := c.mapper.getMapping(parts[0])
	if present {
		name = labels["name"]
		delete(labels, "name")
	} else {
		// If graphite.mapping-strict-match flag is set, we will drop this metric.
		if *strictMatch {
			return
		}
		name = invalidMetricChars.ReplaceAllString(parts[0], "_")
	}

	value, err := strconv.ParseFloat(parts[1], 64)
	if err != nil {
		log.Infof("Invalid value in line: %s", line)
		return
	}
	timestamp, err := strconv.ParseFloat(parts[2], 64)
	if err != nil {
		log.Infof("Invalid timestamp in line: %s", line)
		return
	}
	sample := graphiteSample{
		OriginalName: parts[0],
		Name:         name,
		Value:        value,
		Labels:       labels,
		Type:         prometheus.GaugeValue,
		Help:         fmt.Sprintf("Graphite metric %s", parts[0]),
		Timestamp:    time.Unix(int64(timestamp), int64(math.Mod(timestamp, 1.0)*1e9)),
	}
	lastProcessed.Set(float64(time.Now().UnixNano()) / 1e9)
	c.ch <- &sample
}
コード例 #23
0
ファイル: web.go プロジェクト: rajthilakmca/prometheus
// Run serves the HTTP endpoints.
func (h *Handler) Run() {
	log.Infof("Listening on %s", h.options.ListenAddress)

	// If we cannot bind to a port, retry after 30 seconds.
	for {
		err := http.ListenAndServe(h.options.ListenAddress, h.router)
		if err != nil {
			log.Errorf("Could not listen on %s: %s", h.options.ListenAddress, err)
		}
		time.Sleep(30 * time.Second)
	}
}
コード例 #24
0
ファイル: notifier.go プロジェクト: tamsky/alertmanager
func processResponse(r *http.Response, targetName string, a *Alert) error {
	spec := fmt.Sprintf("%s notification for alert %v", targetName, a.Fingerprint())
	if r == nil {
		return fmt.Errorf("No HTTP response for %s", spec)
	}
	defer r.Body.Close()
	respBuf, err := ioutil.ReadAll(r.Body)
	if err != nil {
		return err
	}
	log.Infof("Sent %s. Response: HTTP %d: %s", spec, r.StatusCode, respBuf)
	return nil
}
コード例 #25
0
ファイル: main.go プロジェクト: caskey/blackbox_exporter
func main() {
	flag.Parse()

	yamlFile, err := ioutil.ReadFile(*configFile)

	if err != nil {
		log.Fatalf("Error reading config file: %s", err)
	}

	config := Config{}

	err = yaml.Unmarshal(yamlFile, &config)
	if err != nil {
		log.Fatalf("Error parsing config file: %s", err)
	}
	log.Infof("Configuration loaded from: %s", *configFile)

	http.Handle("/metrics", prometheus.Handler())
	http.HandleFunc("/probe",
		func(w http.ResponseWriter, r *http.Request) {
			probeHandler(w, r, &config)
		})
	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
		w.Write([]byte(`<html>
            <head><title>Blackbox Exporter</title></head>
            <body>
            <h1>Blackbox Exporter</h1>
            <p><a href="/probe?target=prometheus.io&module=http_2xx">Probe prometheus.io for http_2xx</a></p>
            <p><a href="/metrics">Metrics</a></p>
            </body>
            </html>`))
	})
	log.Infof("Listening for connections on %s", *addr)
	if err := http.ListenAndServe(*addr, nil); err != nil {
		log.Fatalf("Error starting HTTP server: %s", err)
	}
}
コード例 #26
0
// Start implements Storage.
func (s *memorySeriesStorage) Start() (err error) {
	var syncStrategy syncStrategy
	switch s.options.SyncStrategy {
	case Never:
		syncStrategy = func() bool { return false }
	case Always:
		syncStrategy = func() bool { return true }
	case Adaptive:
		syncStrategy = func() bool { return !s.isDegraded() }
	default:
		panic("unknown sync strategy")
	}

	var p *persistence
	p, err = newPersistence(s.options.PersistenceStoragePath, s.options.Dirty, s.options.PedanticChecks, syncStrategy)
	if err != nil {
		return err
	}
	s.persistence = p
	// Persistence must start running before loadSeriesMapAndHeads() is called.
	go s.persistence.run()

	defer func() {
		if err != nil {
			if e := p.close(); e != nil {
				log.Errorln("Error closing persistence:", e)
			}
		}
	}()

	log.Info("Loading series map and head chunks...")
	s.fpToSeries, s.numChunksToPersist, err = p.loadSeriesMapAndHeads()
	if err != nil {
		return err
	}
	log.Infof("%d series loaded.", s.fpToSeries.length())
	s.numSeries.Set(float64(s.fpToSeries.length()))

	s.mapper, err = newFPMapper(s.fpToSeries, p)
	if err != nil {
		return err
	}

	go s.handleEvictList()
	go s.loop()

	return nil
}
コード例 #27
0
ファイル: main.go プロジェクト: gitter-badger/prometheus
func reloadConfig(filename string, rls ...Reloadable) bool {
	log.Infof("Loading configuration file %s", filename)

	conf, err := config.LoadFromFile(filename)
	if err != nil {
		log.Errorf("Couldn't load configuration (-config.file=%s): %v", filename, err)
		log.Errorf("Note: The configuration format has changed with version 0.14. Please see the documentation (http://prometheus.io/docs/operating/configuration/) and the provided configuration migration tool (https://github.com/prometheus/migrate).")
		return false
	}
	success := true

	for _, rl := range rls {
		success = success && rl.ApplyConfig(conf)
	}
	return success
}
コード例 #28
0
ファイル: notifier.go プロジェクト: NuclearDog/alertmanager
func (n *notifier) sendPagerDutyNotification(serviceKey string, op notificationOp, a *Alert) error {
	// http://developer.pagerduty.com/documentation/integration/events/trigger
	eventType := ""
	switch op {
	case notificationOpTrigger:
		eventType = "trigger"
	case notificationOpResolve:
		eventType = "resolve"
	}
	incidentKey := a.Fingerprint()
	buf, err := json.Marshal(map[string]interface{}{
		"service_key":  serviceKey,
		"event_type":   eventType,
		"description":  a.Description,
		"incident_key": incidentKey,
		"client":       "Prometheus Alertmanager",
		"client_url":   n.alertmanagerURL,
		"details": map[string]interface{}{
			"grouping_labels": a.Labels,
			"extra_labels":    a.Payload,
			"runbook":         a.Runbook,
			"summary":         a.Summary,
		},
	})
	if err != nil {
		return err
	}

	resp, err := http.Post(
		*pagerdutyAPIURL,
		contentTypeJSON,
		bytes.NewBuffer(buf),
	)
	if err != nil {
		return err
	}
	defer resp.Body.Close()

	respBuf, err := ioutil.ReadAll(resp.Body)
	if err != nil {
		return err
	}

	log.Infof("Sent PagerDuty notification: %v: HTTP %d: %s", incidentKey, resp.StatusCode, respBuf)
	// BUG: Check response for result of operation.
	return nil
}
コード例 #29
0
ファイル: main.go プロジェクト: lig/collectd_exporter
func main() {
	flag.Parse()

	c := newCollectdCollector()
	prometheus.MustRegister(c)

	startCollectdServer(c)

	if *collectdPostPath != "" {
		http.HandleFunc(*collectdPostPath, c.collectdPost)
	}

	http.Handle(*metricsPath, prometheus.Handler())

	log.Infof("Starting Server: %s", *webAddress)
	http.ListenAndServe(*webAddress, nil)
}
コード例 #30
0
func main() {
	flag.Parse()

	handler := prometheus.Handler()
	prometheus.MustRegister(percentage)
	prometheus.MustRegister(temperature)
	prometheus.MustRegister(timestamp)
	prometheus.MustRegister(capacity)

	http.Handle(*metricsPath, handler)
	go startPolling()

	log.Infof("Starting tank_utility_exporter v%s at %s", Version, *listenAddress)
	err := http.ListenAndServe(*listenAddress, nil)
	if err != nil {
		log.Fatal(err)
	}
}