Esempio n. 1
0
func (e *periodicExporter) scrapeSlaves() {
	e.slaves.Lock()
	urls := make([]string, len(e.slaves.urls))
	copy(urls, e.slaves.urls)
	e.slaves.Unlock()

	urlCount := len(urls)
	log.Debugf("active slaves: %d", urlCount)

	urlChan := make(chan string)
	metricsChan := make(chan prometheus.Metric)
	go e.setMetrics(metricsChan)

	poolSize := concurrentFetch
	if urlCount < concurrentFetch {
		poolSize = urlCount
	}

	log.Debugf("creating fetch pool of size %d", poolSize)

	var wg sync.WaitGroup
	wg.Add(poolSize)
	for i := 0; i < poolSize; i++ {
		go e.fetch(urlChan, metricsChan, &wg)
	}

	for _, url := range urls {
		urlChan <- url
	}
	close(urlChan)

	wg.Wait()
	close(metricsChan)
}
Esempio n. 2
0
func (c *runitCollector) Update(ch chan<- prometheus.Metric) error {
	services, err := runit.GetServices("/etc/service")
	if err != nil {
		return err
	}

	for _, service := range services {
		status, err := service.Status()
		if err != nil {
			log.Debugf("Couldn't get status for %s: %s, skipping...", service.Name, err)
			continue
		}

		log.Debugf("%s is %d on pid %d for %d seconds", service.Name, status.State, status.Pid, status.Duration)
		c.state.WithLabelValues(service.Name).Set(float64(status.State))
		c.stateDesired.WithLabelValues(service.Name).Set(float64(status.Want))
		c.stateTimestamp.WithLabelValues(service.Name).Set(float64(status.Timestamp.Unix()))
		if status.NormallyUp {
			c.stateNormal.WithLabelValues(service.Name).Set(1)
		} else {
			c.stateNormal.WithLabelValues(service.Name).Set(0)
		}
	}
	c.state.Collect(ch)
	c.stateDesired.Collect(ch)
	c.stateNormal.Collect(ch)
	c.stateTimestamp.Collect(ch)

	return nil
}
Esempio n. 3
0
func (c *gmondCollector) setMetric(name, cluster string, metric ganglia.Metric) {
	if _, ok := c.metrics[name]; !ok {
		var desc string
		var title string
		for _, element := range metric.ExtraData.ExtraElements {
			switch element.Name {
			case "DESC":
				desc = element.Val
			case "TITLE":
				title = element.Val
			}
			if title != "" && desc != "" {
				break
			}
		}
		log.Debugf("Register %s: %s", name, desc)
		c.metrics[name] = prometheus.NewGaugeVec(
			prometheus.GaugeOpts{
				Namespace: gangliaNamespace,
				Name:      name,
				Help:      desc,
			},
			[]string{"cluster"},
		)
	}
	log.Debugf("Set %s{cluster=%q}: %f", name, cluster, metric.Value)
	c.metrics[name].WithLabelValues(cluster).Set(metric.Value)
}
Esempio n. 4
0
// Expose filesystem fullness.
func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
	mpds, err := mountPointDetails()
	if err != nil {
		return nil, err
	}
	stats = []filesystemStats{}
	for _, mpd := range mpds {
		if c.ignoredMountPointsPattern.MatchString(mpd.mountPoint) {
			log.Debugf("Ignoring mount point: %s", mpd.mountPoint)
			continue
		}
		buf := new(syscall.Statfs_t)
		err := syscall.Statfs(mpd.mountPoint, buf)
		if err != nil {
			log.Debugf("Statfs on %s returned %s",
				mpd.mountPoint, err)
			continue
		}

		labelValues := []string{mpd.device, mpd.mountPoint, mpd.fsType}
		stats = append(stats, filesystemStats{
			labelValues: labelValues,
			size:        float64(buf.Blocks) * float64(buf.Bsize),
			free:        float64(buf.Bfree) * float64(buf.Bsize),
			avail:       float64(buf.Bavail) * float64(buf.Bsize),
			files:       float64(buf.Files),
			filesFree:   float64(buf.Ffree),
		})
	}
	return stats, nil
}
Esempio n. 5
0
// StopScraper implements Target.
func (t *Target) StopScraper() {
	log.Debugf("Stopping scraper for target %v...", t)

	close(t.scraperStopping)
	<-t.scraperStopped

	log.Debugf("Scraper for target %v stopped.", t)
}
Esempio n. 6
0
// Stop implements the TargetProvider interface.
func (sd *ServersetDiscovery) Stop() {
	log.Debugf("Stopping serverset service discovery for %s %s", sd.conf.Servers, sd.conf.Paths)

	// Terminate Run.
	sd.runDone <- struct{}{}

	log.Debugf("Serverset service discovery for %s %s stopped", sd.conf.Servers, sd.conf.Paths)
}
Esempio n. 7
0
func probeTCP(target string, w http.ResponseWriter, module Module) bool {
	deadline := time.Now().Add(module.Timeout)
	conn, err := net.DialTimeout("tcp", target, module.Timeout)
	if err != nil {
		return false
	}
	defer conn.Close()
	// Set a deadline to prevent the following code from blocking forever.
	// If a deadline cannot be set, better fail the probe by returning an error
	// now rather than blocking forever.
	if err := conn.SetDeadline(deadline); err != nil {
		return false
	}
	scanner := bufio.NewScanner(conn)
	for _, qr := range module.TCP.QueryResponse {
		log.Debugf("Processing query response entry %+v", qr)
		send := qr.Send
		if qr.Expect != "" {
			re, err := regexp.Compile(qr.Expect)
			if err != nil {
				log.Errorf("Could not compile %q into regular expression: %v", qr.Expect, err)
				return false
			}
			var match []int
			// Read lines until one of them matches the configured regexp.
			for scanner.Scan() {
				log.Debugf("read %q\n", scanner.Text())
				match = re.FindSubmatchIndex(scanner.Bytes())
				if match != nil {
					log.Debugf("regexp %q matched %q", re, scanner.Text())
					break
				}
			}
			if scanner.Err() != nil {
				return false
			}
			if match == nil {
				return false
			}
			send = string(re.Expand(nil, []byte(send), scanner.Bytes(), match))
		}
		if send != "" {
			log.Debugf("Sending %q", send)
			if _, err := fmt.Fprintf(conn, "%s\n", send); err != nil {
				return false
			}
		}
	}
	return true
}
Esempio n. 8
0
func (e *exporter) scrape(ch chan<- prometheus.Metric) {
	defer close(ch)

	servers := []string{}

	if e.useExhibitor {
		url := fmt.Sprintf("http://%s/exhibitor/v1/cluster/list", e.addrs[0])
		rr, err := http.NewRequest("GET", url, nil)
		if err != nil {
			panic(err)
		}

		rresp, err := httpClient.Transport.RoundTrip(rr)
		if err != nil {
			e.recordErr(err)
			return
		}
		defer rresp.Body.Close()

		body, err := ioutil.ReadAll(rresp.Body)
		if err != nil {
			e.recordErr(err)
			return
		}

		var serverList Servers
		err = json.Unmarshal(body, &serverList)
		if err != nil {
			e.recordErr(err)
			return
		}

		log.Debugf("Got serverlist from Exhibitor: %s", serverList)

		for _, host := range serverList.Servers {
			servers = append(servers, fmt.Sprintf("%s:%d", host, serverList.Port))
		}
	} else {
		servers = e.addrs
	}

	log.Debugf("Polling servers: %s", servers)
	var wg sync.WaitGroup
	for _, server := range servers {
		log.Debugf("Polling server: %s", server)
		wg.Add(1)
		go e.pollServer(server, ch, &wg)
	}
	wg.Wait()
}
Esempio n. 9
0
func ParseReceiver(s string) (receiver string, err error) {
	var si interface{}

	rdr := bytes.NewReader([]byte(s))
	v, err := ReadRESP(rdr)
	if err != nil {
		return _EMPTY_, err
	}
	log.Debugf("[ParseReceiver] %v: %q", reflect.TypeOf(v), v)

	switch t := v.(type) {
	case []interface{}:
		si = t[receiverPos]
	default:
		return _EMPTY_, errors.New("No receiver found.")
	}

	switch t := si.(type) {
	case interface{}:
		receiver = t.(string)
	case string:
		receiver = t
	case int:
		receiver = strconv.Itoa(t)
	}
	if receiver == _EMPTY_ {
		return receiver, errors.New("No receiver found.")
	}
	return
}
// Expose filesystem fullness.
func (c *filesystemCollector) Update(ch chan<- prometheus.Metric) (err error) {
	var mntbuf *C.struct_statfs
	count := C.getmntinfo(&mntbuf, C.MNT_NOWAIT)
	if count == 0 {
		return errors.New("getmntinfo() failed")
	}

	mnt := (*[1 << 30]C.struct_statfs)(unsafe.Pointer(mntbuf))
	for i := 0; i < int(count); i++ {
		name := C.GoString(&mnt[i].f_mntonname[0])
		if c.ignoredMountPointsPattern.MatchString(name) {
			log.Debugf("Ignoring mount point: %s", name)
			continue
		}
		c.size.WithLabelValues(name).Set(float64(mnt[i].f_blocks) * float64(mnt[i].f_bsize))
		c.free.WithLabelValues(name).Set(float64(mnt[i].f_bfree) * float64(mnt[i].f_bsize))
		c.avail.WithLabelValues(name).Set(float64(mnt[i].f_bavail) * float64(mnt[i].f_bsize))
		c.files.WithLabelValues(name).Set(float64(mnt[i].f_files))
		c.filesFree.WithLabelValues(name).Set(float64(mnt[i].f_ffree))
	}

	c.size.Collect(ch)
	c.free.Collect(ch)
	c.avail.Collect(ch)
	c.files.Collect(ch)
	c.filesFree.Collect(ch)
	return err
}
Esempio n. 11
0
// Store sends a batch of samples to InfluxDB via its HTTP API.
func (c *Client) Store(samples model.Samples) error {
	points := make([]influx.Point, 0, len(samples))
	for _, s := range samples {
		v := float64(s.Value)
		if math.IsNaN(v) || math.IsInf(v, 0) {
			log.Debugf("cannot send value %f to InfluxDB, skipping sample %#v", v, s)
			c.ignoredSamples.Inc()
			continue
		}
		points = append(points, influx.Point{
			Measurement: string(s.Metric[model.MetricNameLabel]),
			Tags:        tagsFromMetric(s.Metric),
			Time:        s.Timestamp.Time(),
			Precision:   "ms",
			Fields: map[string]interface{}{
				"value": v,
			},
		})
	}

	bps := influx.BatchPoints{
		Points:          points,
		Database:        c.database,
		RetentionPolicy: c.retentionPolicy,
	}
	_, err := c.client.Write(bps)
	return err
}
Esempio n. 12
0
func (tc *zookeeperTreeCache) loop(failureMode bool) {
	retryChan := make(chan struct{})

	failure := func() {
		failureMode = true
		time.AfterFunc(time.Second*10, func() {
			retryChan <- struct{}{}
		})
	}
	if failureMode {
		failure()
	}

	for {
		select {
		case ev := <-tc.head.events:
			log.Debugf("Received Zookeeper event: %s", ev)
			if failureMode {
				continue
			}
			if ev.Type == zk.EventNotWatching {
				log.Infof("Lost connection to Zookeeper.")
				failure()
			} else {
				path := strings.TrimPrefix(ev.Path, tc.prefix)
				parts := strings.Split(path, "/")
				node := tc.head
				for _, part := range parts[1:] {
					childNode := node.children[part]
					if childNode == nil {
						childNode = &zookeeperTreeCacheNode{
							events:   tc.head.events,
							children: map[string]*zookeeperTreeCacheNode{},
							done:     make(chan struct{}, 1),
						}
						node.children[part] = childNode
					}
					node = childNode
				}
				err := tc.recursiveNodeUpdate(ev.Path, node)
				if err != nil {
					log.Errorf("Error during processing of Zookeeper event: %s", err)
					failure()
				}
			}
		case <-retryChan:
			log.Infof("Attempting to resync state with Zookeeper")
			err := tc.recursiveNodeUpdate(tc.prefix, tc.head)
			if err == nil {
				failureMode = false
			} else {
				log.Errorf("Error during Zookeeper resync: %s", err)
				failure()
			}
		case <-tc.stop:
			close(tc.events)
			return
		}
	}
}
Esempio n. 13
0
func activateHandler(w http.ResponseWriter, r *http.Request) {
	// TODO(kendall): Count activations
	// TODO(kendall): Return errors or at least clean up the logic
	// TODO(kendall): Flag for ssl?
	// TODO(kendall): Figure out host and port
	// TODO(kendall): Authtoken
	// TODO(kendall): postrate
	// TODO(kendall): highprec
	var activation ted5000ActivationRequest
	var port_suffix string
	var port int
	var err error
	if err := xml.NewDecoder(r.Body).Decode(&activation); err != nil {
		fmt.Fprintf(w, "Could not parse activation XML: %s", err)
	}
	log.Debugf("Activation request: %s", activation)
	if _, port_suffix, err = net.SplitHostPort(*listenAddress); err != nil {
		fmt.Printf("Could not determine port from %s: %s", *listenAddress, err)
	}
	if port, err = strconv.Atoi(port_suffix); err != nil {
		fmt.Printf("Could not create port (%s) to int: %s", port, err)
	}
	if err := xml.NewEncoder(w).Encode(ted5000ActivationResponse{
		PostServer: r.Host,
		UseSSL:     false,
		PostPort:   port,
		PostRate:   *postRate,
		PostURL:    "/post",
		HighPrec:   "T"}); err != nil {
		fmt.Fprintf(w, "Could not create XML activation response: %s", err)
	}
}
Esempio n. 14
0
func ParsePayload(s string) (payload string, err error) {
	var pi interface{}

	rdr := bytes.NewReader([]byte(s))
	v, err := ReadRESP(rdr)
	if err != nil {
		return _EMPTY_, err
	}
	log.Debugf("%v: %q", reflect.TypeOf(v), v)
	switch t := v.(type) {
	case []interface{}:
		pi = t[payloadPos]
	default:
		return _EMPTY_, errors.New("No receiver found.")
	}

	switch t := pi.(type) {
	case interface{}:
		payload = t.(string)
	case string:
		payload = t
	case int:
		payload = strconv.Itoa(t)
	}
	if payload == _EMPTY_ {
		return payload, errors.New("No payload found.")
	}
	return
}
Esempio n. 15
0
func ParseEvent(s string) (event string, err error) {
	var ei interface{}

	rdr := bytes.NewReader([]byte(s))
	v, err := ReadRESP(rdr)
	if err != nil {
		return _EMPTY_, err
	}
	log.Debugf("[ParseEvent] %v: %v", reflect.TypeOf(v), v)

	switch t := v.(type) {
	case []interface{}:
		ei = t[eventPos]
	default:
		return _EMPTY_, errors.New("No receiver found.")
	}

	switch t := ei.(type) {
	case interface{}:
		event = t.(string)
	case string:
		event = t
	case int:
		event = strconv.Itoa(t)
	}
	if event == _EMPTY_ {
		return event, errors.New("No event found.")
	}
	return
}
Esempio n. 16
0
func (c *timeCollector) Update(ch chan<- prometheus.Metric) (err error) {
	now := float64(time.Now().Unix())
	log.Debugf("Set time: %f", now)
	c.metric.Set(now)
	c.metric.Collect(ch)
	return err
}
Esempio n. 17
0
// Expose filesystem fullness.
func (c *filesystemCollector) Update(ch chan<- prometheus.Metric) (err error) {
	mpds, err := mountPointDetails()
	if err != nil {
		return err
	}
	for _, mpd := range mpds {
		if c.ignoredMountPointsPattern.MatchString(mpd.mountPoint) {
			log.Debugf("Ignoring mount point: %s", mpd.mountPoint)
			continue
		}
		buf := new(syscall.Statfs_t)
		err := syscall.Statfs(mpd.mountPoint, buf)
		if err != nil {
			return fmt.Errorf("Statfs on %s returned %s", mpd.mountPoint, err)
		}

		c.size.WithLabelValues(mpd.device, mpd.mountPoint, mpd.fsType).Set(float64(buf.Blocks) * float64(buf.Bsize))
		c.free.WithLabelValues(mpd.device, mpd.mountPoint, mpd.fsType).Set(float64(buf.Bfree) * float64(buf.Bsize))
		c.avail.WithLabelValues(mpd.device, mpd.mountPoint, mpd.fsType).Set(float64(buf.Bavail) * float64(buf.Bsize))
		c.files.WithLabelValues(mpd.device, mpd.mountPoint, mpd.fsType).Set(float64(buf.Files))
		c.filesFree.WithLabelValues(mpd.device, mpd.mountPoint, mpd.fsType).Set(float64(buf.Ffree))
	}
	c.size.Collect(ch)
	c.free.Collect(ch)
	c.avail.Collect(ch)
	c.files.Collect(ch)
	c.filesFree.Collect(ch)
	return err
}
Esempio n. 18
0
// Expose filesystem fullness.
func (c *filesystemCollector) GetStats() (stats []filesystemStats, err error) {
	var mntbuf *C.struct_statfs
	count := C.getmntinfo(&mntbuf, C.MNT_NOWAIT)
	if count == 0 {
		return nil, errors.New("getmntinfo() failed")
	}

	mnt := (*[1 << 30]C.struct_statfs)(unsafe.Pointer(mntbuf))
	stats = []filesystemStats{}
	for i := 0; i < int(count); i++ {
		name := C.GoString(&mnt[i].f_mntonname[0])
		if c.ignoredMountPointsPattern.MatchString(name) {
			log.Debugf("Ignoring mount point: %s", name)
			continue
		}
		labelValues := []string{name}
		stats = append(stats, filesystemStats{
			labelValues: labelValues,
			size:        float64(mnt[i].f_blocks) * float64(mnt[i].f_bsize),
			free:        float64(mnt[i].f_bfree) * float64(mnt[i].f_bsize),
			avail:       float64(mnt[i].f_bavail) * float64(mnt[i].f_bsize),
			files:       float64(mnt[i].f_files),
			filesFree:   float64(mnt[i].f_ffree),
		})
	}
	return stats, nil
}
Esempio n. 19
0
// Query handles the /api/query endpoint.
func (api *API) Query(w http.ResponseWriter, r *http.Request) {
	setAccessControlHeaders(w)
	w.Header().Set("Content-Type", "application/json")

	params := httputil.GetQueryParams(r)
	expr := params.Get("expr")

	timestamp, err := parseTimestampOrNow(params.Get("timestamp"), api.Now())
	if err != nil {
		httpJSONError(w, fmt.Errorf("invalid query timestamp %s", err), http.StatusBadRequest)
		return
	}

	query, err := api.QueryEngine.NewInstantQuery(expr, timestamp)
	if err != nil {
		httpJSONError(w, err, http.StatusOK)
		return
	}
	res := query.Exec()
	if res.Err != nil {
		httpJSONError(w, res.Err, http.StatusOK)
		return
	}
	log.Debugf("Instant query: %s\nQuery stats:\n%s\n", expr, query.Stats())

	httputil.RespondJSON(w, res.Value)
}
Esempio n. 20
0
func (c *gmondCollector) Update(ch chan<- prometheus.Metric) (err error) {
	conn, err := net.Dial(gangliaProto, gangliaAddress)
	log.Debugf("gmondCollector Update")
	if err != nil {
		return fmt.Errorf("Can't connect to gmond: %s", err)
	}
	conn.SetDeadline(time.Now().Add(gangliaTimeout))

	ganglia := ganglia.Ganglia{}
	decoder := xml.NewDecoder(bufio.NewReader(conn))
	decoder.CharsetReader = toUtf8

	err = decoder.Decode(&ganglia)
	if err != nil {
		return fmt.Errorf("Couldn't parse xml: %s", err)
	}

	for _, cluster := range ganglia.Clusters {
		for _, host := range cluster.Hosts {

			for _, metric := range host.Metrics {
				name := illegalCharsRE.ReplaceAllString(metric.Name, "_")

				c.setMetric(name, cluster.Name, metric)
			}
		}
	}
	for _, m := range c.metrics {
		m.Collect(ch)
	}
	return err
}
Esempio n. 21
0
func parseNetDevStats(r io.Reader, ignore *regexp.Regexp) (map[string]map[string]string, error) {
	scanner := bufio.NewScanner(r)
	scanner.Scan() // skip first header
	scanner.Scan()
	parts := strings.Split(string(scanner.Text()), "|")
	if len(parts) != 3 { // interface + receive + transmit
		return nil, fmt.Errorf("invalid header line in %s: %s",
			procNetDev, scanner.Text())
	}

	header := strings.Fields(parts[1])
	netDev := map[string]map[string]string{}
	for scanner.Scan() {
		line := strings.TrimLeft(string(scanner.Text()), " ")
		parts := procNetDevFieldSep.Split(line, -1)
		if len(parts) != 2*len(header)+1 {
			return nil, fmt.Errorf("invalid line in %s: %s", procNetDev, scanner.Text())
		}

		dev := parts[0][:len(parts[0])]
		if ignore.MatchString(dev) {
			log.Debugf("Ignoring device: %s", dev)
			continue
		}
		netDev[dev] = map[string]string{}
		for i, v := range header {
			netDev[dev]["receive_"+v] = parts[i+1]
			netDev[dev]["transmit_"+v] = parts[i+1+len(header)]
		}
	}
	return netDev, nil
}
func (m *MetricNames) get(api newRelicApi, appId int) error {
	log.Debugf("Requesting metrics names for application id %d.", appId)
	path := fmt.Sprintf("/v2/applications/%s/metrics.json", strconv.Itoa(appId))

	body, err := api.req(path, "")
	if err != nil {
		log.Print("Error getting metric names: ", err)
		return err
	}

	dec := json.NewDecoder(bytes.NewReader(body))

	for {
		var part MetricNames
		if err = dec.Decode(&part); err == io.EOF {
			break
		} else if err != nil {
			log.Print("Error decoding metric names: ", err)
			return err
		}
		tmpMetrics := append(m.Metrics, part.Metrics...)
		m.Metrics = tmpMetrics
	}

	return nil
}
Esempio n. 23
0
// QueryRange handles the /api/query_range endpoint.
func (api *API) QueryRange(w http.ResponseWriter, r *http.Request) {
	setAccessControlHeaders(w)
	w.Header().Set("Content-Type", "application/json")

	params := httputil.GetQueryParams(r)
	expr := params.Get("expr")

	duration, err := parseDuration(params.Get("range"))
	if err != nil {
		httpJSONError(w, fmt.Errorf("invalid query range: %s", err), http.StatusBadRequest)
		return
	}

	step, err := parseDuration(params.Get("step"))
	if err != nil {
		httpJSONError(w, fmt.Errorf("invalid query resolution: %s", err), http.StatusBadRequest)
		return
	}

	end, err := parseTimestampOrNow(params.Get("end"), api.Now())
	if err != nil {
		httpJSONError(w, fmt.Errorf("invalid query timestamp: %s", err), http.StatusBadRequest)
		return
	}
	// TODO(julius): Remove this special-case handling a while after PromDash and
	// other API consumers have been changed to no longer set "end=0" for setting
	// the current time as the end time. Instead, the "end" parameter should
	// simply be omitted or set to an empty string for that case.
	if end == 0 {
		end = api.Now()
	}

	// For safety, limit the number of returned points per timeseries.
	// This is sufficient for 60s resolution for a week or 1h resolution for a year.
	if duration/step > 11000 {
		err := errors.New("exceeded maximum resolution of 11,000 points per timeseries. Try decreasing the query resolution (?step=XX)")
		httpJSONError(w, err, http.StatusBadRequest)
		return
	}

	// Align the start to step "tick" boundary.
	end = end.Add(-time.Duration(end.UnixNano() % int64(step)))
	start := end.Add(-duration)

	query, err := api.QueryEngine.NewRangeQuery(expr, start, end, step)
	if err != nil {
		httpJSONError(w, err, http.StatusOK)
		return
	}
	matrix, err := query.Exec().Matrix()
	if err != nil {
		httpJSONError(w, err, http.StatusOK)
		return
	}

	log.Debugf("Range query: %s\nQuery stats:\n%s\n", expr, query.Stats())
	httputil.RespondJSON(w, matrix)
}
Esempio n. 24
0
// handleTargetUpdates receives target group updates and handles them in the
// context of the given job config.
func (tm *TargetManager) handleTargetUpdates(cfg *config.ScrapeConfig, ch <-chan *config.TargetGroup) {
	for tg := range ch {
		log.Debugf("Received potential update for target group %q", tg.Source)

		if err := tm.updateTargetGroup(tg, cfg); err != nil {
			log.Errorf("Error updating targets: %s", err)
		}
	}
}
Esempio n. 25
0
func (c *lastLoginCollector) Update(ch chan<- prometheus.Metric) (err error) {
	last, err := getLastLoginTime()
	if err != nil {
		return fmt.Errorf("couldn't get last seen: %s", err)
	}
	log.Debugf("Set node_last_login_time: %f", last)
	c.metric.Set(last)
	c.metric.Collect(ch)
	return err
}
Esempio n. 26
0
// stop shuts down the file watcher.
func (fd *FileDiscovery) stop() {
	log.Debugf("Stopping file discovery for %s...", fd.paths)

	// Closing the watcher will deadlock unless all events and errors are drained.
	go func() {
		for {
			select {
			case <-fd.watcher.Errors:
			case <-fd.watcher.Events:
				// Drain all events and errors.
			default:
				return
			}
		}
	}()
	fd.watcher.Close()

	log.Debugf("File discovery for %s stopped.", fd.paths)
}
Esempio n. 27
0
func (c *loadavgCollector) Update(ch chan<- prometheus.Metric) (err error) {
	load, err := getLoad1()
	if err != nil {
		return fmt.Errorf("Couldn't get load: %s", err)
	}
	log.Debugf("Set node_load: %f", load)
	c.metric.Set(load)
	c.metric.Collect(ch)
	return err
}
Esempio n. 28
0
func (c *ntpCollector) Update(ch chan<- prometheus.Metric) (err error) {
	t, err := ntp.Time(*ntpServer)
	if err != nil {
		return fmt.Errorf("Couldn't get ntp drift: %s", err)
	}
	drift := t.Sub(time.Now())
	log.Debugf("Set ntp_drift_seconds: %f", drift.Seconds())
	c.drift.Set(drift.Seconds())
	c.drift.Collect(ch)
	return err
}
func (a *AppList) get(api newRelicApi) error {
	log.Debugf("Requesting application list from %s.", api.server.String())
	body, err := api.req("/v2/applications.json", "")
	if err != nil {
		log.Print("Error getting application list: ", err)
		return err
	}

	err = json.Unmarshal(body, a)
	return err
}
Esempio n. 30
0
// Stop implements the TargetProvider interface.
func (cd *ConsulDiscovery) Stop() {
	log.Debugf("Stopping Consul service discovery for %s", cd.clientConf.Address)

	// The lock prevents Run from terminating while the watchers attempt
	// to send on their channels.
	cd.mu.Lock()
	defer cd.mu.Unlock()

	// The watching goroutines will terminate after their next watch timeout.
	// As this can take long, the channel is buffered and we do not wait.
	for _, srv := range cd.services {
		srv.done <- struct{}{}
	}
	cd.srvsDone <- struct{}{}

	// Terminate Run.
	cd.runDone <- struct{}{}

	log.Debugf("Consul service discovery for %s stopped.", cd.clientConf.Address)
}