func (e *Exporter) scrape(ch chan<- prometheus.Metric) { e.up.Set(1) e.totalScrapes.Inc() if time.Now().Sub(tempUpdated) < staleInterval { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "sensor", "temperature"), "Current temperature.", []string{"metric"}, nil), prometheus.GaugeValue, celsius, "celsius", ) ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "sensor", "temperature"), "Temperature in C and F.", []string{"metric"}, nil), prometheus.GaugeValue, fahrenheit, "fahrenheit", ) } if time.Now().Sub(soundUpdated) < staleInterval { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "sensor", "sound"), "Sound (noise) level.", nil, nil), prometheus.GaugeValue, float64(sound), ) } if time.Now().Sub(lightUpdated) < staleInterval { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(prometheus.BuildFQName(namespace, "sensor", "light"), "Luminous flux per unit area.", nil, nil), prometheus.GaugeValue, float64(light), ) } }
// Takes a prometheus registry and returns a new Collector exposing // Device stats. func NewDevstatCollector() (Collector, error) { return &devstatCollector{ devinfo: &C.struct_devinfo{}, bytes: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(Namespace, devstatSubsystem, "bytes_total"), "The total number of bytes in transactions.", []string{"device", "type"}, nil, ), prometheus.CounterValue}, transfers: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(Namespace, devstatSubsystem, "transfers_total"), "The total number of transactions.", []string{"device", "type"}, nil, ), prometheus.CounterValue}, duration: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(Namespace, devstatSubsystem, "duration_seconds_total"), "The total duration of transactions in seconds.", []string{"device", "type"}, nil, ), prometheus.CounterValue}, busyTime: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(Namespace, devstatSubsystem, "busy_time_seconds_total"), "Total time the device had one or more transactions outstanding in seconds.", []string{"device"}, nil, ), prometheus.CounterValue}, blocks: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(Namespace, devstatSubsystem, "blocks_transferred_total"), "The total number of blocks transferred.", []string{"device"}, nil, ), prometheus.CounterValue}, }, nil }
// Takes a prometheus registry and returns a new Collector exposing // kernel/system statistics. func NewPfCollector() (Collector, error) { subsystem := "pf" return &pfCollector{ match: prometheus.NewDesc( prometheus.BuildFQName(Namespace, subsystem, "match"), "PF internal Match counter.", nil, nil, ), states: prometheus.NewDesc( prometheus.BuildFQName(Namespace, subsystem, "state_count"), "State Table entry count.", nil, nil, ), searches: prometheus.NewDesc( prometheus.BuildFQName(Namespace, subsystem, "state_search"), "State Table search counter.", nil, nil, ), inserts: prometheus.NewDesc( prometheus.BuildFQName(Namespace, subsystem, "state_insert"), "State Table insert counter.", nil, nil, ), removals: prometheus.NewDesc( prometheus.BuildFQName(Namespace, subsystem, "state_removal"), "State Table remove counter.", nil, nil, ), }, nil }
func NewRunitCollector() (Collector, error) { var ( subsystem = "service" constLabels = prometheus.Labels{"supervisor": "runit"} labelNames = []string{"service"} ) return &runitCollector{ state: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(Namespace, subsystem, "state"), "State of runit service.", labelNames, constLabels, ), prometheus.GaugeValue}, stateDesired: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(Namespace, subsystem, "desired_state"), "Desired state of runit service.", labelNames, constLabels, ), prometheus.GaugeValue}, stateNormal: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(Namespace, subsystem, "normal_state"), "Normal state of runit service.", labelNames, constLabels, ), prometheus.GaugeValue}, stateTimestamp: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(Namespace, subsystem, "state_last_change_timestamp_seconds"), "Unix timestamp of the last runit service state change.", labelNames, constLabels, ), prometheus.GaugeValue}, }, nil }
// Take a prometheus registry and return a new Collector exposing load average. func NewLoadavgCollector() (Collector, error) { return &loadavgCollector{ metric: []typedDesc{ {prometheus.NewDesc(Namespace+"_load1", "1m load average.", nil, nil), prometheus.GaugeValue}, {prometheus.NewDesc(Namespace+"_load5", "5m load average.", nil, nil), prometheus.GaugeValue}, {prometheus.NewDesc(Namespace+"_load15", "15m load average.", nil, nil), prometheus.GaugeValue}, }, }, nil }
func NewExporter(uri string) *Exporter { return &Exporter{ URI: uri, up: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "up"), "Could the apache server be reached", nil, nil), scrapeFailures: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Name: "exporter_scrape_failures_total", Help: "Number of errors while scraping apache.", }), accessesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "accesses_total"), "Current total apache accesses", nil, nil), kBytesTotal: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "sent_kilobytes_total"), "Current total kbytes sent", nil, nil), uptime: prometheus.NewDesc( prometheus.BuildFQName(namespace, "", "uptime_seconds_total"), "Current uptime in seconds", nil, nil), workers: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Name: "workers", Help: "Apache worker statuses", }, []string{"state"}, ), scoreboard: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Name: "scoreboard", Help: "Apache scoreboard statuses", }, []string{"state"}, ), connections: prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Name: "connections", Help: "Apache connection statuses", }, []string{"state"}, ), client: &http.Client{ Transport: &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: *insecure}, }, }, } }
func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric) { containers, err := c.infoProvider.SubcontainersInfo("/", &info.ContainerInfoRequest{NumStats: 1}) if err != nil { c.errors.Set(1) glog.Warningf("Couldn't get containers: %s", err) return } for _, container := range containers { baseLabels := []string{"id"} id := container.Name name := id if len(container.Aliases) > 0 { name = container.Aliases[0] baseLabels = append(baseLabels, "name") } image := container.Spec.Image if len(image) > 0 { baseLabels = append(baseLabels, "image") } baseLabelValues := []string{id, name, image}[:len(baseLabels)] if c.containerNameToLabels != nil { newLabels := c.containerNameToLabels(name) for k, v := range newLabels { baseLabels = append(baseLabels, k) baseLabelValues = append(baseLabelValues, v) } } // Container spec desc := prometheus.NewDesc("container_start_time_seconds", "Start time of the container since unix epoch in seconds.", baseLabels, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.CreationTime.Unix()), baseLabelValues...) if container.Spec.HasCpu { desc := prometheus.NewDesc("container_spec_cpu_shares", "CPU share of the container.", baseLabels, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.Cpu.Limit), baseLabelValues...) } if container.Spec.HasMemory { desc := prometheus.NewDesc("container_spec_memory_limit_bytes", "Memory limit for the container.", baseLabels, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.Limit), baseLabelValues...) desc = prometheus.NewDesc("container_spec_memory_swap_limit_bytes", "Memory swap limit for the container.", baseLabels, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, specMemoryValue(container.Spec.Memory.SwapLimit), baseLabelValues...) } // Now for the actual metrics stats := container.Stats[0] for _, cm := range c.containerMetrics { desc := cm.desc(baseLabels) for _, metricValue := range cm.getValues(stats) { ch <- prometheus.MustNewConstMetric(desc, cm.valueType, float64(metricValue.value), append(baseLabelValues, metricValue.labels...)...) } } } }
// Takes a prometheus registry and returns a new Collector exposing // CPU stats. func NewStatCollector() (Collector, error) { return &statCollector{ cpu: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(Namespace, "cpu", "seconds_total"), "Seconds the CPU spent in each mode.", []string{"cpu", "mode"}, nil, ), prometheus.CounterValue}, temp: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(Namespace, "cpu", "temperature_celsius"), "CPU temperature", []string{"cpu"}, nil, ), prometheus.GaugeValue}, }, nil }
// Takes a prometheus registry and returns a new Collector exposing // conntrack stats func NewConntrackCollector() (Collector, error) { return &conntrackCollector{ current: prometheus.NewDesc( prometheus.BuildFQName(Namespace, "", "nf_conntrack_entries"), "Number of currently allocated flow entries for connection tracking.", nil, nil, ), limit: prometheus.NewDesc( prometheus.BuildFQName(Namespace, "", "nf_conntrack_entries_limit"), "Maximum size of connection tracking table.", nil, nil, ), }, nil }
// NewBondingCollector returns a newly allocated bondingCollector. // It exposes the number of configured and active slave of linux bonding interfaces. func NewBondingCollector() (Collector, error) { return &bondingCollector{ slaves: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(Namespace, "bonding", "slaves"), "Number of configured slaves per bonding interface.", []string{"master"}, nil, ), prometheus.GaugeValue}, active: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(Namespace, "bonding", "active"), "Number of active slaves per bonding interface.", []string{"master"}, nil, ), prometheus.GaugeValue}, }, nil }
func TestIPVSCollector(t *testing.T) { if err := flag.Set("collector.procfs", "fixtures/proc"); err != nil { t.Fatal(err) } collector, err := newIPVSCollector() if err != nil { t.Fatal(err) } sink := make(chan prometheus.Metric) go func() { err = collector.Update(sink) if err != nil { t.Fatal(err) } }() for expected, got := range map[string]string{ prometheus.NewDesc("node_ipvs_connections_total", "The total number of connections made.", nil, nil).String(): (<-sink).Desc().String(), prometheus.NewDesc("node_ipvs_incoming_packets_total", "The total number of incoming packets.", nil, nil).String(): (<-sink).Desc().String(), prometheus.NewDesc("node_ipvs_outgoing_packets_total", "The total number of outgoing packets.", nil, nil).String(): (<-sink).Desc().String(), prometheus.NewDesc("node_ipvs_incoming_bytes_total", "The total amount of incoming data.", nil, nil).String(): (<-sink).Desc().String(), prometheus.NewDesc("node_ipvs_outgoing_bytes_total", "The total amount of outgoing data.", nil, nil).String(): (<-sink).Desc().String(), prometheus.NewDesc("node_ipvs_backend_connections_active", "The current active connections by local and remote address.", []string{"local_address", "local_port", "remote_address", "remote_port", "proto"}, nil).String(): (<-sink).Desc().String(), prometheus.NewDesc("node_ipvs_backend_connections_inactive", "The current inactive connections by local and remote address.", []string{"local_address", "local_port", "remote_address", "remote_port", "proto"}, nil).String(): (<-sink).Desc().String(), prometheus.NewDesc("node_ipvs_backend_weight", "The current backend weight by local and remote address.", []string{"local_address", "local_port", "remote_address", "remote_port", "proto"}, nil).String(): (<-sink).Desc().String(), } { if expected != got { t.Fatalf("Expected '%s' but got '%s'", expected, got) } } }
// Collect implements prometheus.Collector. func (c *scollectorCollector) Collect(ch chan<- prometheus.Metric) { Log.Debug("Collect", "samples", len(c.samples)) ch <- lastProcessed Log.Debug("Collect", "lastProcessed", lastProcessed) c.mu.Lock() samples := make([]scollectorSample, 0, len(c.samples)) for _, sample := range c.samples { samples = append(samples, sample) } c.mu.Unlock() ageLimit := time.Now().Add(-*sampleExpiry) for _, sample := range samples { if ageLimit.After(sample.Timestamp) { Log.Debug("skipping old sample", "limit", ageLimit, "sample", sample) continue } Log.Debug("sending sample", "sample", sample) ch <- prometheus.MustNewConstMetric( prometheus.NewDesc(sample.Name, sample.Help, []string{}, sample.Labels), sample.Type, sample.Value, ) } }
func (m *Metric) PromDescription(exporterName string) *prometheus.Desc { return prometheus.NewDesc( prometheus.BuildFQName("", exporterName, m.Name), m.Description, m.LabelKeys, nil, ) }
func (c *netDevCollector) Update(ch chan<- prometheus.Metric) (err error) { netDev, err := getNetDevStats(c.ignoredDevicesPattern) if err != nil { return fmt.Errorf("couldn't get netstats: %s", err) } for dev, devStats := range netDev { for key, value := range devStats { desc, ok := c.metricDescs[key] if !ok { desc = prometheus.NewDesc( prometheus.BuildFQName(Namespace, c.subsystem, key), fmt.Sprintf("Network device statistic %s.", key), []string{"device"}, nil, ) c.metricDescs[key] = desc } v, err := strconv.ParseFloat(value, 64) if err != nil { return fmt.Errorf("invalid value %s in netstats: %s", value, err) } ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, v, dev) } } return nil }
func (c *vmStatCollector) Update(ch chan<- prometheus.Metric) (err error) { file, err := os.Open(procFilePath("vmstat")) if err != nil { return err } defer file.Close() scanner := bufio.NewScanner(file) for scanner.Scan() { parts := strings.Fields(scanner.Text()) value, err := strconv.ParseFloat(parts[1], 64) if err != nil { return err } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(Namespace, vmStatSubsystem, parts[0]), fmt.Sprintf("/proc/vmstat information field %s.", parts[0]), nil, nil), prometheus.UntypedValue, value, ) } return err }
func (c *netStatCollector) Update(ch chan<- prometheus.Metric) (err error) { netStats, err := getNetStats(procFilePath("net/netstat")) if err != nil { return fmt.Errorf("couldn't get netstats: %s", err) } snmpStats, err := getNetStats(procFilePath("net/snmp")) if err != nil { return fmt.Errorf("couldn't get SNMP stats: %s", err) } // Merge the results of snmpStats into netStats (collisions are possible, but // we know that the keys are always unique for the given use case). for k, v := range snmpStats { netStats[k] = v } for protocol, protocolStats := range netStats { for name, value := range protocolStats { key := protocol + "_" + name v, err := strconv.ParseFloat(value, 64) if err != nil { return fmt.Errorf("invalid value %s in netstats: %s", value, err) } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(Namespace, netStatsSubsystem, key), fmt.Sprintf("Protocol %s statistic %s.", protocol, name), nil, nil, ), prometheus.UntypedValue, v, ) } } return nil }
// ScrapeClientStat collects from `information_schema.client_statistics`. func ScrapeClientStat(db *sql.DB, ch chan<- prometheus.Metric) error { var varName, varVal string err := db.QueryRow(userstatCheckQuery).Scan(&varName, &varVal) if err != nil { log.Debugln("Detailed client stats are not available.") return nil } if varVal == "OFF" { log.Debugf("MySQL @@%s is OFF.", varName) return nil } informationSchemaClientStatisticsRows, err := db.Query(clientStatQuery) if err != nil { return err } defer informationSchemaClientStatisticsRows.Close() // The client column is assumed to be column[0], while all other data is assumed to be coerceable to float64. // Because of the client column, clientStatData[0] maps to columnNames[1] when reading off the metrics // (because clientStatScanArgs is mapped as [ &client, &clientData[0], &clientData[1] ... &clientdata[n] ] // To map metrics to names therefore we always range over columnNames[1:] columnNames, err := informationSchemaClientStatisticsRows.Columns() if err != nil { return err } var ( client string // Holds the client name, which should be in column 0. clientStatData = make([]float64, len(columnNames)-1) // 1 less because of the client column. clientStatScanArgs = make([]interface{}, len(columnNames)) ) clientStatScanArgs[0] = &client for i := range clientStatData { clientStatScanArgs[i+1] = &clientStatData[i] } for informationSchemaClientStatisticsRows.Next() { if err := informationSchemaClientStatisticsRows.Scan(clientStatScanArgs...); err != nil { return err } // Loop over column names, and match to scan data. Unknown columns // will be filled with an untyped metric number. We assume other then // cient, that we'll only get numbers. for idx, columnName := range columnNames[1:] { if metricType, ok := informationSchemaClientStatisticsTypes[columnName]; ok { ch <- prometheus.MustNewConstMetric(metricType.desc, metricType.vtype, float64(clientStatData[idx]), client) } else { // Unknown metric. Report as untyped. desc := prometheus.NewDesc(prometheus.BuildFQName(namespace, informationSchema, fmt.Sprintf("client_statistics_%s", strings.ToLower(columnName))), fmt.Sprintf("Unsupported metric from column %s", columnName), []string{"client"}, nil) ch <- prometheus.MustNewConstMetric(desc, prometheus.UntypedValue, float64(clientStatData[idx]), client) } } } return nil }
// Turn the MetricMap column mapping into a prometheus descriptor mapping. func makeDescMap(metricMaps map[NamespaceAndQuery]map[string]ColumnMapping) map[NamespaceAndQuery]MetricMapNamespace { var metricMap = make(map[NamespaceAndQuery]MetricMapNamespace) for namespaceAndQuery, mappings := range metricMaps { namespace := namespaceAndQuery.namespace thisMap := make(map[string]MetricMap) // Get the constant labels var constLabels []string for columnName, columnMapping := range mappings { if columnMapping.usage == LABEL { constLabels = append(constLabels, columnName) } } for columnName, columnMapping := range mappings { switch columnMapping.usage { case DISCARD, LABEL: thisMap[columnName] = MetricMap{ discard: true, } case COUNTER: thisMap[columnName] = MetricMap{ vtype: prometheus.CounterValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, constLabels, nil), } case GAUGE: thisMap[columnName] = MetricMap{ vtype: prometheus.GaugeValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, constLabels, nil), } case MAPPEDMETRIC: thisMap[columnName] = MetricMap{ vtype: prometheus.GaugeValue, desc: prometheus.NewDesc(fmt.Sprintf("%s_%s", namespace, columnName), columnMapping.description, constLabels, nil), mapping: columnMapping.mapping, } } } metricMap[namespaceAndQuery] = MetricMapNamespace{constLabels, thisMap} } return metricMap }
// Collect implements Prometheus.Collector. func (c collector) Collect(ch chan<- prometheus.Metric) { start := time.Now() pdus, err := ScrapeTarget(c.target, c.module) if err != nil { log.Errorf("Error scraping target %s: %s", c.target, err) ch <- prometheus.NewInvalidMetric(prometheus.NewDesc("snmp_error", "Error scraping target", nil, nil), err) return } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc("snmp_scrape_walk_duration_seconds", "Time SNMP walk/bulkwalk took.", nil, nil), prometheus.GaugeValue, float64(time.Since(start).Seconds())) ch <- prometheus.MustNewConstMetric( prometheus.NewDesc("snmp_scrape_pdus_returned", "PDUs returned from walk.", nil, nil), prometheus.GaugeValue, float64(len(pdus))) oidToPdu := make(map[string]gosnmp.SnmpPDU, len(pdus)) for _, pdu := range pdus { oidToPdu[pdu.Name[1:]] = pdu } metricTree := buildMetricTree(c.module.Metrics) // Look for metrics that match each pdu. PduLoop: for oid, pdu := range oidToPdu { head := metricTree oidList := oidToList(oid) for i, o := range oidList { var ok bool head, ok = head.children[o] if !ok { continue PduLoop } if head.metric != nil { // Found a match. ch <- pduToSample(oidList[i+1:], &pdu, head.metric, oidToPdu) break } } } ch <- prometheus.MustNewConstMetric( prometheus.NewDesc("snmp_scrape_duration_seconds", "Total SNMP time scrape took (walk and processing).", nil, nil), prometheus.GaugeValue, float64(time.Since(start).Seconds())) }
// Takes a prometheus registry and returns a new Collector exposing // entropy stats func NewEntropyCollector() (Collector, error) { return &entropyCollector{ entropy_avail: prometheus.NewDesc( prometheus.BuildFQName(Namespace, "", "entropy_available_bits"), "Bits of available entropy.", nil, nil, ), }, nil }
// Takes a prometheus registry and returns a new Collector exposing // systemd statistics. func NewSystemdCollector() (Collector, error) { const subsystem = "systemd" unitDesc := prometheus.NewDesc( prometheus.BuildFQName(Namespace, subsystem, "unit_state"), "Systemd unit", []string{"name", "state"}, nil, ) systemRunningDesc := prometheus.NewDesc( prometheus.BuildFQName(Namespace, subsystem, "system_running"), "Whether the system is operational (see 'systemctl is-system-running')", nil, nil, ) return &systemdCollector{ unitDesc: unitDesc, systemRunningDesc: systemRunningDesc, }, nil }
// New constructs a neww Notifier. func New(o *Options) *Notifier { ctx, cancel := context.WithCancel(context.Background()) return &Notifier{ queue: make(model.Alerts, 0, o.QueueCapacity), ctx: ctx, cancel: cancel, more: make(chan struct{}, 1), opts: o, latency: prometheus.NewSummaryVec(prometheus.SummaryOpts{ Namespace: namespace, Subsystem: subsystem, Name: "latency_seconds", Help: "Latency quantiles for sending alert notifications (not including dropped notifications).", }, []string{alertmanagerLabel}, ), errors: prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "errors_total", Help: "Total number of errors sending alert notifications.", }, []string{alertmanagerLabel}, ), sent: prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "sent_total", Help: "Total number of alerts successfully sent.", }, []string{alertmanagerLabel}, ), dropped: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "dropped_total", Help: "Total number of alerts dropped due to alert manager missing in configuration.", }), queueLength: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "queue_length", Help: "The number of alert notifications in the queue.", }), queueCapacity: prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "queue_capacity"), "The capacity of the alert notifications queue.", nil, nil, ), prometheus.GaugeValue, float64(o.QueueCapacity), ), } }
func ExampleConstHistogram() { desc := prometheus.NewDesc( "http_request_duration_seconds", "A histogram of the HTTP request durations.", []string{"code", "method"}, prometheus.Labels{"owner": "example"}, ) // Create a constant histogram from values we got from a 3rd party telemetry system. h := prometheus.MustNewConstHistogram( desc, 4711, 403.34, map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233}, "200", "get", ) // Just for demonstration, let's check the state of the histogram by // (ab)using its Write method (which is usually only used by Prometheus // internally). metric := &dto.Metric{} h.Write(metric) fmt.Println(proto.MarshalTextString(metric)) // Output: // label: < // name: "code" // value: "200" // > // label: < // name: "method" // value: "get" // > // label: < // name: "owner" // value: "example" // > // histogram: < // sample_count: 4711 // sample_sum: 403.34 // bucket: < // cumulative_count: 121 // upper_bound: 25 // > // bucket: < // cumulative_count: 2403 // upper_bound: 50 // > // bucket: < // cumulative_count: 3221 // upper_bound: 100 // > // bucket: < // cumulative_count: 4233 // upper_bound: 200 // > // > }
// NewTCPStatCollector takes a returns // a new Collector exposing network stats. func NewTCPStatCollector() (Collector, error) { return &tcpStatCollector{ desc: typedDesc{prometheus.NewDesc( prometheus.BuildFQName(Namespace, "tcp", "connection_states"), "Number of connection states.", []string{"state"}, nil, ), prometheus.GaugeValue}, }, nil }
// Takes a prometheus registry and returns a new Collector exposing // interrupts stats func NewInterruptsCollector() (Collector, error) { return &interruptsCollector{ desc: typedDesc{prometheus.NewDesc( Namespace+"_interrupts", "Interrupt details.", interruptLabelNames, nil, ), prometheus.CounterValue}, }, nil }
// ScrapeSlaveStatus collects from `SHOW SLAVE STATUS`. func ScrapeSlaveStatus(db *sql.DB, ch chan<- prometheus.Metric) error { var ( slaveStatusRows *sql.Rows err error ) // Leverage lock-free SHOW SLAVE STATUS by guessing the right suffix for _, suffix := range slaveStatusQuerySuffixes { slaveStatusRows, err = db.Query(fmt.Sprint(slaveStatusQuery, suffix)) if err == nil { break } } if err != nil { return err } defer slaveStatusRows.Close() slaveCols, err := slaveStatusRows.Columns() if err != nil { return err } for slaveStatusRows.Next() { // As the number of columns varies with mysqld versions, // and sql.Scan requires []interface{}, we need to create a // slice of pointers to the elements of slaveData. scanArgs := make([]interface{}, len(slaveCols)) for i := range scanArgs { scanArgs[i] = &sql.RawBytes{} } if err := slaveStatusRows.Scan(scanArgs...); err != nil { return err } masterUUID := columnValue(scanArgs, slaveCols, "Master_UUID") masterHost := columnValue(scanArgs, slaveCols, "Master_Host") channelName := columnValue(scanArgs, slaveCols, "Channel_Name") for i, col := range slaveCols { if value, ok := parseStatus(*scanArgs[i].(*sql.RawBytes)); ok { // Silently skip unparsable values. ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, slaveStatus, strings.ToLower(col)), "Generic metric from SHOW SLAVE STATUS.", []string{"master_uuid", "master_host", "channel_name"}, nil, ), prometheus.UntypedValue, value, masterUUID, masterHost, channelName, ) } } } return nil }
func newDRBDStringPairMetric(name string, desc string, valueOkay string) drbdStringPairMetric { return drbdStringPairMetric{ desc: prometheus.NewDesc( prometheus.BuildFQName(Namespace, "drbd", name), desc, []string{"device", "node"}, nil), valueOkay: valueOkay, } }
func newDesc(m *metrics.Metric, l *metrics.LabelSet) *prometheus.Desc { labels := prometheus.Labels{} for k, v := range l.Labels { labels[k] = v } name := m.Name help := strings.ToLower(m.Kind.String()) return prometheus.NewDesc(name, help, []string{}, labels) }
// Takes a prometheus registry and returns a new Collector exposing // the current system time in seconds since epoch. func NewTimeCollector() (Collector, error) { return &timeCollector{ desc: prometheus.NewDesc( Namespace+"_time", "System time in seconds since epoch (1970).", nil, nil, ), }, nil }
// Takes a prometheus registry and returns a new Collector exposing // CPU stats. func NewCPUCollector() (Collector, error) { return &statCollector{ cpu: prometheus.NewDesc( prometheus.BuildFQName(Namespace, "", "cpu"), "Seconds the cpus spent in each mode.", []string{"cpu", "mode"}, nil, ), }, nil }