// Collect implements prometheus.Collector. func (s *memorySeriesStorage) Collect(ch chan<- prometheus.Metric) { s.persistence.Collect(ch) s.mapper.Collect(ch) ch <- s.persistErrors ch <- prometheus.MustNewConstMetric( maxChunksToPersistDesc, prometheus.GaugeValue, float64(s.maxChunksToPersist), ) ch <- prometheus.MustNewConstMetric( numChunksToPersistDesc, prometheus.GaugeValue, float64(s.getNumChunksToPersist()), ) ch <- s.numSeries s.seriesOps.Collect(ch) ch <- s.ingestedSamplesCount ch <- s.outOfOrderSamplesCount ch <- s.nonExistentSeriesMatchesCount ch <- prometheus.MustNewConstMetric( numMemChunksDesc, prometheus.GaugeValue, float64(atomic.LoadInt64(&numMemChunks)), ) s.maintainSeriesDuration.Collect(ch) ch <- s.persistenceUrgencyScore ch <- s.rushedMode }
// Collect implements prometheus.Collector. func (c *viewCollector) Collect(ch chan<- prometheus.Metric) { for _, v := range c.stats.Views { for _, s := range v.Cache { ch <- prometheus.MustNewConstMetric( resolverCache, prometheus.GaugeValue, float64(s.Gauge), v.Name, s.Name, ) } for _, s := range v.ResolverQueries { ch <- prometheus.MustNewConstMetric( resolverQueries, prometheus.CounterValue, float64(s.Counter), v.Name, s.Name, ) } for _, s := range v.ResolverStats { if desc, ok := resolverMetricStats[s.Name]; ok { ch <- prometheus.MustNewConstMetric( desc, prometheus.CounterValue, float64(s.Counter), v.Name, ) } if desc, ok := resolverLabelStats[s.Name]; ok { ch <- prometheus.MustNewConstMetric( desc, prometheus.CounterValue, float64(s.Counter), v.Name, s.Name, ) } } if buckets, count, err := histogram(v.ResolverStats); err == nil { ch <- prometheus.MustNewConstHistogram( resolverQueryDuration, count, math.NaN(), buckets, v.Name, ) } else { log.Warn("Error parsing RTT:", err) } } }
// ScrapePerfEventsWaits collects from `performance_schema.events_waits_summary_global_by_event_name`. func ScrapePerfEventsWaits(db *sql.DB, ch chan<- prometheus.Metric) error { // Timers here are returned in picoseconds. perfSchemaEventsWaitsRows, err := db.Query(perfEventsWaitsQuery) if err != nil { return err } defer perfSchemaEventsWaitsRows.Close() var ( eventName string count, time uint64 ) for perfSchemaEventsWaitsRows.Next() { if err := perfSchemaEventsWaitsRows.Scan( &eventName, &count, &time, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaEventsWaitsDesc, prometheus.CounterValue, float64(count), eventName, ) ch <- prometheus.MustNewConstMetric( performanceSchemaEventsWaitsTimeDesc, prometheus.CounterValue, float64(time)/picoSeconds, eventName, ) } return nil }
func (self *podAndContainerCollector) Collect(ch chan<- prometheus.Metric) { runningContainers, err := self.containerCache.RunningContainers() if err != nil { glog.Warning("Failed to get running container information while collecting metrics: %v", err) return } // Get a set of running pods. runningPods := make(map[types.UID]struct{}) for _, cont := range runningContainers { _, uid, _, _, err := dockertools.ParseDockerName(cont.Names[0]) if err != nil { continue } runningPods[uid] = struct{}{} } ch <- prometheus.MustNewConstMetric( runningPodCountDesc, prometheus.GaugeValue, float64(len(runningPods))) ch <- prometheus.MustNewConstMetric( runningContainerCountDesc, prometheus.GaugeValue, float64(len(runningContainers))) }
func (c *filesystemCollector) Update(ch chan<- prometheus.Metric) (err error) { stats, err := c.GetStats() if err != nil { return err } for _, s := range stats { ch <- prometheus.MustNewConstMetric( c.sizeDesc, prometheus.GaugeValue, s.size, s.labelValues..., ) ch <- prometheus.MustNewConstMetric( c.freeDesc, prometheus.GaugeValue, s.free, s.labelValues..., ) ch <- prometheus.MustNewConstMetric( c.availDesc, prometheus.GaugeValue, s.avail, s.labelValues..., ) ch <- prometheus.MustNewConstMetric( c.filesDesc, prometheus.GaugeValue, s.files, s.labelValues..., ) ch <- prometheus.MustNewConstMetric( c.filesFreeDesc, prometheus.GaugeValue, s.filesFree, s.labelValues..., ) } return nil }
func (e *Exporter) scrape(ch chan<- prometheus.Metric) { e.up.Set(1) e.totalScrapes.Inc() if time.Now().Sub(tempUpdated) < staleInterval { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "sensor", "temperature"), "Current temperature.", []string{"metric"}, nil), prometheus.GaugeValue, celsius, "celsius", ) ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "sensor", "temperature"), "Temperature in C and F.", []string{"metric"}, nil), prometheus.GaugeValue, fahrenheit, "fahrenheit", ) } if time.Now().Sub(soundUpdated) < staleInterval { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "sensor", "sound"), "Sound (noise) level.", nil, nil), prometheus.GaugeValue, float64(sound), ) } if time.Now().Sub(lightUpdated) < staleInterval { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "sensor", "light"), "Luminous flux per unit area.", nil, nil), prometheus.GaugeValue, float64(light), ) } }
// Collect implements prometheus.Collector. func (s *memorySeriesStorage) Collect(ch chan<- prometheus.Metric) { s.persistence.Collect(ch) s.mapper.Collect(ch) ch <- s.persistErrors ch <- prometheus.MustNewConstMetric( maxChunksToPersistDesc, prometheus.GaugeValue, float64(s.maxChunksToPersist), ) ch <- prometheus.MustNewConstMetric( numChunksToPersistDesc, prometheus.GaugeValue, float64(s.getNumChunksToPersist()), ) ch <- s.numSeries s.seriesOps.Collect(ch) ch <- s.ingestedSamplesCount ch <- s.invalidPreloadRequestsCount ch <- prometheus.MustNewConstMetric( numMemChunksDesc, prometheus.GaugeValue, float64(atomic.LoadInt64(&numMemChunks)), ) s.maintainSeriesDuration.Collect(ch) }
func scrapeInformationSchema(db *sql.DB, ch chan<- prometheus.Metric) error { autoIncrementRows, err := db.Query(infoSchemaAutoIncrementQuery) if err != nil { return err } defer autoIncrementRows.Close() var ( schema, table, column string value, max uint64 ) for autoIncrementRows.Next() { if err := autoIncrementRows.Scan( &schema, &table, &column, &value, &max, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( globalInfoSchemaAutoIncrementDesc, prometheus.GaugeValue, float64(value), schema, table, column, ) ch <- prometheus.MustNewConstMetric( globalInfoSchemaAutoIncrementMaxDesc, prometheus.GaugeValue, float64(max), schema, table, column, ) } return nil }
func scrapeBinlogSize(db *sql.DB, ch chan<- prometheus.Metric) error { masterLogRows, err := db.Query(binlogQuery) if err != nil { return err } defer masterLogRows.Close() var ( size uint64 count uint64 filename string filesize uint64 ) size = 0 count = 0 for masterLogRows.Next() { if err := masterLogRows.Scan(&filename, &filesize); err != nil { return nil } size += filesize count++ } ch <- prometheus.MustNewConstMetric( binlogSizeDesc, prometheus.GaugeValue, float64(size), ) ch <- prometheus.MustNewConstMetric( binlogFilesDesc, prometheus.GaugeValue, float64(count), ) return nil }
func GenericCollect(be *BaseExporter, ch chan<- prometheus.Metric) { for _, name := range be.Store.MetricNames() { m, err := be.Store.Get(name) if err != nil { log.Printf("[ERROR] exporter %v unable to get metric %v during GenericCollect due to error: %v", be.Name, name, err) continue } var metric prometheus.Metric if m.PromLabels() != nil { metric = prometheus.MustNewConstMetric( m.PromDescription(be.Name), m.PromType(), m.PromValue(), m.PromLabels()..., ) } else { metric = prometheus.MustNewConstMetric( m.PromDescription(be.Name), m.PromType(), m.PromValue(), ) } ch <- metric } }
func (e *exporter) scrape(ch chan<- prometheus.Metric) { defer close(ch) now := time.Now().UnixNano() defer func() { e.duration.Set(float64(time.Now().UnixNano()-now) / 1000000000) }() recordErr := func(err error) { glog.Warning(err) e.errors.Inc() } url, err := e.f.leaderURL() if err != nil { recordErr(err) return } varsURL := fmt.Sprintf("%s/vars.json", url) resp, err := httpClient.Get(varsURL) if err != nil { recordErr(err) return } defer resp.Body.Close() var vars map[string]interface{} if err = json.NewDecoder(resp.Body).Decode(&vars); err != nil { recordErr(err) return } for name, v := range vars { v, ok := v.(float64) if !ok { continue } if desc, ok := counters[name]; ok { ch <- prometheus.MustNewConstMetric( desc, prometheus.CounterValue, v, noLables..., ) } if desc, ok := gauges[name]; ok { ch <- prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, v, noLables..., ) } labelVars(ch, name, v) } }
// ScrapeClientStat collects from `information_schema.client_statistics`. func ScrapeClientStat(db *sql.DB, ch chan<- prometheus.Metric) error { var varName, varVal string err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal) if err != nil { log.Debugln("Detailed client stats are not available.") return nil } if varVal == "OFF" { log.Debugf("MySQL @@%s is OFF.", varName) return nil } informationSchemaClientStatisticsRows, err := db.Query(clientStatQuery) if err != nil { return err } defer informationSchemaClientStatisticsRows.Close() // The client column is assumed to be column[0], while all other data is assumed to be coerceable to float64. // Because of the client column, clientStatData[0] maps to columnNames[1] when reading off the metrics // (because clientStatScanArgs is mapped as [ &client, &clientData[0], &clientData[1] ... &clientdata[n] ] // To map metrics to names therefore we always range over columnNames[1:] columnNames, err := informationSchemaClientStatisticsRows.Columns() if err != nil { return err } var ( client string // Holds the client name, which should be in column 0. clientStatData = make([]float64, len(columnNames)-1) // 1 less because of the client column. clientStatScanArgs = make([]interface{}, len(columnNames)) ) clientStatScanArgs[0] = &client for i := range clientStatData { clientStatScanArgs[i+1] = &clientStatData[i] } for informationSchemaClientStatisticsRows.Next() { if err := informationSchemaClientStatisticsRows.Scan(clientStatScanArgs...); err != nil { return err } // Loop over column names, and match to scan data. Unknown columns // will be filled with an untyped metric number. We assume other then // cient, that we'll only get numbers. for idx, columnName := range columnNames[1:] { if metricType, ok := informationSchemaClientStatisticsTypes[columnName]; ok { ch <- prometheus.MustNewConstMetric(metricType.desc, metricType.vtype, float64(clientStatData[idx]), client) } else { // Unknown metric. Report as untyped. desc := prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, fmt.Sprintf("client_statistics_%s", strings.ToLower(columnName))), fmt.Sprintf("Unsupported metric from column %s", columnName), []string{"client"}, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, float64(clientStatData[idx]), client) } } } return nil }
// Collect implements prometheus.Collector. func (server *Server) Collect(ch chan<- prometheus.Metric) { status := server.Status() for id, res := range status.Resources { ch <- prometheus.MustNewConstMetric(server.descs.has, prometheus.GaugeValue, res.SumHas, id) ch <- prometheus.MustNewConstMetric(server.descs.wants, prometheus.GaugeValue, res.SumWants, id) ch <- prometheus.MustNewConstMetric(server.descs.subclients, prometheus.GaugeValue, float64(res.Count), id) } }
// Collect implements prometheus.Collector. func (c *taskCollector) Collect(ch chan<- prometheus.Metric) { threadModel := c.stats.TaskManager.ThreadModel ch <- prometheus.MustNewConstMetric( tasksRunning, prometheus.GaugeValue, float64(threadModel.TasksRunning), ) ch <- prometheus.MustNewConstMetric( workerThreads, prometheus.GaugeValue, float64(threadModel.WorkerThreads), ) }
func (c *drbdCollector) Update(ch chan<- prometheus.Metric) (err error) { statsFile := procFilePath("drbd") file, err := os.Open(statsFile) if err != nil { if os.IsNotExist(err) { log.Debugf("Not collecting DRBD statistics, as %s does not exist: %s", statsFile, err) return nil } return err } defer file.Close() scanner := bufio.NewScanner(file) scanner.Split(bufio.ScanWords) device := "unknown" for scanner.Scan() { field := scanner.Text() if kv := strings.Split(field, ":"); len(kv) == 2 { if id, err := strconv.ParseUint(kv[0], 10, 64); err == nil && kv[1] == "" { device = fmt.Sprintf("drbd%d", id) } else if metric, ok := drbdNumericalMetrics[kv[0]]; ok { // Numerical value. value, err := strconv.ParseFloat(kv[1], 64) if err != nil { return err } ch <- prometheus.MustNewConstMetric( metric.desc, metric.valueType, value*metric.multiplier, device) } else if metric, ok := drbdStringPairMetrics[kv[0]]; ok { // String pair value. values := strings.Split(kv[1], "/") ch <- prometheus.MustNewConstMetric( metric.desc, prometheus.GaugeValue, metric.isOkay(values[0]), device, "local") ch <- prometheus.MustNewConstMetric( metric.desc, prometheus.GaugeValue, metric.isOkay(values[1]), device, "remote") } else if kv[0] == "cs" { // Connection state. var connected float64 if kv[1] == "Connected" { connected = 1 } ch <- prometheus.MustNewConstMetric( drbdConnected, prometheus.GaugeValue, connected, device) } else { log.Debugf("Don't know how to process key-value pair [%s: %q]", kv[0], kv[1]) } } else { log.Debugf("Don't know how to process string %q", field) } } return scanner.Err() }
func (c *PrometheusCollector) collectMachineInfo(ch chan<- prometheus.Metric) { machineInfo, err := c.infoProvider.GetMachineInfo() if err != nil { c.errors.Set(1) glog.Warningf("Couldn't get machine info: %s", err) return } ch <- prometheus.MustNewConstMetric(machineInfoCoresDesc, prometheus.GaugeValue, float64(machineInfo.NumCores)) ch <- prometheus.MustNewConstMetric(machineInfoMemoryDesc, prometheus.GaugeValue, float64(machineInfo.MemoryCapacity)) }
func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric) { containers, err := c.infoProvider.SubcontainersInfo("/", &info.ContainerInfoRequest{NumStats: 1}) if err != nil { c.errors.Set(1) glog.Warningf("Couldn't get containers: %s", err) return } for _, container := range containers { baseLabels := []string{"id"} id := container.Name name := id if len(container.Aliases) > 0 { name = container.Aliases[0] baseLabels = append(baseLabels, "name") } image := container.Spec.Image if len(image) > 0 { baseLabels = append(baseLabels, "image") } baseLabelValues := []string{id, name, image}[:len(baseLabels)] if c.containerNameToLabels != nil { newLabels := c.containerNameToLabels(name) for k, v := range newLabels { baseLabels = append(baseLabels, k) baseLabelValues = append(baseLabelValues, v) } } // Container spec desc := prometheus.NewDesc("container_start_time_seconds", "Start time of the container since unix epoch in seconds.", baseLabels, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.CreationTime.Unix()), baseLabelValues...) if container.Spec.HasCpu { desc := prometheus.NewDesc("container_spec_cpu_shares", "CPU share of the container.", baseLabels, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Limit), baseLabelValues...) } if container.Spec.HasMemory { desc := prometheus.NewDesc("container_spec_memory_limit_bytes", "Memory limit for the container.", baseLabels, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.Limit), baseLabelValues...) desc = prometheus.NewDesc("container_spec_memory_swap_limit_bytes", "Memory swap limit for the container.", baseLabels, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.SwapLimit), baseLabelValues...) } // Now for the actual metrics stats := container.Stats[0] for _, cm := range c.containerMetrics { desc := cm.desc(baseLabels) for _, metricValue := range cm.getValues(stats) { ch <- prometheus.MustNewConstMetric(desc, cm.valueType, float64(metricValue.value), append(baseLabelValues, metricValue.labels...)...) } } } }
// ScrapeTableStat collects from `information_schema.table_statistics`. func ScrapeTableStat(db *sql.DB, ch chan<- prometheus.Metric) error { var varName, varVal string err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal) if err != nil { log.Debugln("Detailed table stats are not available.") return nil } if varVal == "OFF" { log.Debugf("MySQL @@%s is OFF.", varName) return nil } informationSchemaTableStatisticsRows, err := db.Query(tableStatQuery) if err != nil { return err } defer informationSchemaTableStatisticsRows.Close() var ( tableSchema string tableName string rowsRead uint64 rowsChanged uint64 rowsChangedXIndexes uint64 ) for informationSchemaTableStatisticsRows.Next() { err = informationSchemaTableStatisticsRows.Scan( &tableSchema, &tableName, &rowsRead, &rowsChanged, &rowsChangedXIndexes, ) if err != nil { return err } ch <- prometheus.MustNewConstMetric( infoSchemaTableStatsRowsReadDesc, prometheus.CounterValue, float64(rowsRead), tableSchema, tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaTableStatsRowsChangedDesc, prometheus.CounterValue, float64(rowsChanged), tableSchema, tableName, ) ch <- prometheus.MustNewConstMetric( infoSchemaTableStatsRowsChangedXIndexesDesc, prometheus.CounterValue, float64(rowsChangedXIndexes), tableSchema, tableName, ) } return nil }
func (c countersCollector) Collect(ch chan<- prometheus.Metric) { for k, n := range c.c.Counts() { if c.nLabels > 1 { labels := split(k) if len(labels) != c.nLabels { err := fmt.Errorf("wrong number of labels in MultiCounters key: %d != %d (key=%q)", len(labels), c.nLabels, k) ch <- prometheus.NewInvalidMetric(c.desc, err) continue } ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(n), labels...) continue } ch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(n), k) } }
// ScrapePerfFileEvents collects from `performance_schema.file_summary_by_event_name`. func ScrapePerfFileEvents(db *sql.DB, ch chan<- prometheus.Metric) error { // Timers here are returned in picoseconds. perfSchemaFileEventsRows, err := db.Query(perfFileEventsQuery) if err != nil { return err } defer perfSchemaFileEventsRows.Close() var ( eventName string countRead, timeRead, bytesRead uint64 countWrite, timeWrite, bytesWrite uint64 countMisc, timeMisc uint64 ) for perfSchemaFileEventsRows.Next() { if err := perfSchemaFileEventsRows.Scan( &eventName, &countRead, &timeRead, &bytesRead, &countWrite, &timeWrite, &bytesWrite, &countMisc, &timeMisc, ); err != nil { return err } ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsDesc, prometheus.CounterValue, float64(countRead), eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsTimeDesc, prometheus.CounterValue, float64(timeRead)/picoSeconds, eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsBytesDesc, prometheus.CounterValue, float64(bytesRead), eventName, "read", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsDesc, prometheus.CounterValue, float64(countWrite), eventName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsTimeDesc, prometheus.CounterValue, float64(timeWrite)/picoSeconds, eventName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsBytesDesc, prometheus.CounterValue, float64(bytesWrite), eventName, "write", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsDesc, prometheus.CounterValue, float64(countMisc), eventName, "misc", ) ch <- prometheus.MustNewConstMetric( performanceSchemaFileEventsTimeDesc, prometheus.CounterValue, float64(timeMisc)/picoSeconds, eventName, "misc", ) } return nil }
// ScrapeBinlogSize colects from `SHOW BINARY LOGS`. func ScrapeBinlogSize(db *sql.DB, ch chan<- prometheus.Metric) error { var logBin uint8 err := db.QueryRow(logbinQuery).Scan(&logBin) if err != nil { return err } // If log_bin is OFF, do not run SHOW BINARY LOGS which explicitly produces MySQL error if logBin == 0 { return nil } masterLogRows, err := db.Query(binlogQuery) if err != nil { return err } defer masterLogRows.Close() var ( size uint64 count uint64 filename string filesize uint64 ) size = 0 count = 0 for masterLogRows.Next() { if err := masterLogRows.Scan(&filename, &filesize); err != nil { return nil } size += filesize count++ } ch <- prometheus.MustNewConstMetric( binlogSizeDesc, prometheus.GaugeValue, float64(size), ) ch <- prometheus.MustNewConstMetric( binlogFilesDesc, prometheus.GaugeValue, float64(count), ) // The last row contains the last binlog file number. value, _ := strconv.ParseFloat(strings.Split(filename, ".")[1], 64) ch <- prometheus.MustNewConstMetric( binlogFileNumberDesc, prometheus.GaugeValue, value, ) return nil }
// ScrapeEngineTokudbStatus scrapes from `SHOW ENGINE TOKUDB STATUS`. func ScrapeEngineTokudbStatus(db *sql.DB, ch chan<- prometheus.Metric) error { tokudbRows, err := db.Query(engineTokudbStatusQuery) if err != nil { return err } defer tokudbRows.Close() var temp, key string var val sql.RawBytes for tokudbRows.Next() { if err := tokudbRows.Scan(&temp, &key, &val); err != nil { return err } key = strings.ToLower(key) if floatVal, ok := parseStatus(val); ok { ch <- prometheus.MustNewConstMetric( newDesc(tokudb, sanitizeTokudbMetric(key), "Generic metric from SHOW ENGINE TOKUDB STATUS."), prometheus.UntypedValue, floatVal, ) } } return nil }
func (c *netDevCollector) Update(ch chan<- prometheus.Metric) (err error) { netDev, err := getNetDevStats(c.ignoredDevicesPattern) if err != nil { return fmt.Errorf("couldn't get netstats: %s", err) } for dev, devStats := range netDev { for key, value := range devStats { desc, ok := c.metricDescs[key] if !ok { desc = prometheus.NewDesc( prometheus.BuildFQName(Namespace, c.subsystem, key), fmt.Sprintf("Network device statistic %s.", key), []string{"device"}, nil, ) c.metricDescs[key] = desc } v, err := strconv.ParseFloat(value, 64) if err != nil { return fmt.Errorf("invalid value %s in netstats: %s", value, err) } ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, dev) } } return nil }
func (c *netStatCollector) Update(ch chan<- prometheus.Metric) (err error) { netStats, err := getNetStats(procFilePath("net/netstat")) if err != nil { return fmt.Errorf("couldn't get netstats: %s", err) } snmpStats, err := getNetStats(procFilePath("net/snmp")) if err != nil { return fmt.Errorf("couldn't get SNMP stats: %s", err) } // Merge the results of snmpStats into netStats (collisions are possible, but // we know that the keys are always unique for the given use case). for k, v := range snmpStats { netStats[k] = v } for protocol, protocolStats := range netStats { for name, value := range protocolStats { key := protocol + "_" + name v, err := strconv.ParseFloat(value, 64) if err != nil { return fmt.Errorf("invalid value %s in netstats: %s", value, err) } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(Namespace, netStatsSubsystem, key), fmt.Sprintf("Protocol %s statistic %s.", protocol, name), nil, nil, ), prometheus.UntypedValue, v, ) } } return nil }
func (c *systemdCollector) collectSystemState(ch chan<- prometheus.Metric, systemState string) { isSystemRunning := 0.0 if systemState == `"running"` { isSystemRunning = 1.0 } ch <- prometheus.MustNewConstMetric(c.systemRunningDesc, prometheus.GaugeValue, isSystemRunning) }
func (c *vmStatCollector) Update(ch chan<- prometheus.Metric) (err error) { file, err := os.Open(procFilePath("vmstat")) if err != nil { return err } defer file.Close() scanner := bufio.NewScanner(file) for scanner.Scan() { parts := strings.Fields(scanner.Text()) value, err := strconv.ParseFloat(parts[1], 64) if err != nil { return err } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(Namespace, vmStatSubsystem, parts[0]), fmt.Sprintf("/proc/vmstat information field %s.", parts[0]), nil, nil), prometheus.UntypedValue, value, ) } return err }
func scrapeGlobalVariables(db *sql.DB, ch chan<- prometheus.Metric) error { globalVariablesRows, err := db.Query(globalVariablesQuery) if err != nil { return err } defer globalVariablesRows.Close() var key string var val sql.RawBytes for globalVariablesRows.Next() { if err := globalVariablesRows.Scan(&key, &val); err != nil { return err } key = strings.ToLower(key) if floatVal, ok := parseStatus(val); ok { ch <- prometheus.MustNewConstMetric( newDesc(globalVariables, key, "Generic gauge metric from SHOW GLOBAL VARIABLES."), prometheus.GaugeValue, floatVal, ) continue } } return nil }
// Collect implements prometheus.Collector. func (c *scollectorCollector) Collect(ch chan<- prometheus.Metric) { Log.Debug("Collect", "samples", len(c.samples)) ch <- lastProcessed Log.Debug("Collect", "lastProcessed", lastProcessed) c.mu.Lock() samples := make([]scollectorSample, 0, len(c.samples)) for _, sample := range c.samples { samples = append(samples, sample) } c.mu.Unlock() ageLimit := time.Now().Add(-*sampleExpiry) for _, sample := range samples { if ageLimit.After(sample.Timestamp) { Log.Debug("skipping old sample", "limit", ageLimit, "sample", sample) continue } Log.Debug("sending sample", "sample", sample) ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(sample.Name, sample.Help, []string{}, sample.Labels), sample.Type, sample.Value, ) } }
// Query the SHOW variables from the query map // TODO: make this more functional func queryShowVariables(ch chan<- prometheus.Metric, db *sql.DB, variableMap map[string]MetricMapNamespace) []error { log.Debugln("Querying SHOW variables") nonFatalErrors := []error{} for _, mapping := range variableMap { for columnName, columnMapping := range mapping.columnMappings { // Check for a discard request on this value if columnMapping.discard { continue } // Use SHOW to get the value row := db.QueryRow(fmt.Sprintf("SHOW %s;", columnName)) var val interface{} err := row.Scan(&val) if err != nil { nonFatalErrors = append(nonFatalErrors, errors.New(fmt.Sprintln("Error scanning runtime variable:", columnName, err))) continue } fval, ok := columnMapping.conversion(val) if !ok { nonFatalErrors = append(nonFatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName, val))) continue } ch <- prometheus.MustNewConstMetric(columnMapping.desc, columnMapping.vtype, fval) } } return nonFatalErrors }
// Collect fetches the stats from all containers and delivers them as // Prometheus metrics. It implements prometheus.PrometheusCollector. func (c *PrometheusCollector) Collect(ch chan<- prometheus.Metric) { containers, err := c.infoProvider.SubcontainersInfo("/", &info.ContainerInfoRequest{NumStats: 1}) if err != nil { c.errors.Set(1) glog.Warningf("Couldn't get containers: %s", err) return } for _, container := range containers { id := container.Name name := id if len(container.Aliases) > 0 { name = container.Aliases[0] } image := container.Spec.Image stats := container.Stats[0] for _, cm := range c.containerMetrics { desc := cm.desc() for _, metricValue := range cm.getValues(stats) { ch <- prometheus.MustNewConstMetric(desc, cm.valueType, float64(metricValue.value), append([]string{name, id, image}, metricValue.labels...)...) } } } c.errors.Collect(ch) }