func NewEventHandler(cf Config) (events.Handler, error) { h := &handler{listenAddr: cf.ListenAddr, errorSink: cf.ErrorSink} h.connections = prom.NewCounterVec(prom.CounterOpts{ Name: "flux_connections_total", Help: "Number of TCP connections established", }, []string{"individual", "group", "src", "dst", "protocol"}) httpLabels := []string{"individual", "group", "src", "dst", "method", "code"} h.http = prom.NewCounterVec(prom.CounterOpts{ Name: "flux_http_total", Help: "Number of HTTP request/response exchanges", }, httpLabels) h.httpRoundtrip = prom.NewSummaryVec(prom.SummaryOpts{ Name: "flux_http_roundtrip_usec", Help: "HTTP response roundtrip time in microseconds", }, httpLabels) h.httpTotal = prom.NewSummaryVec(prom.SummaryOpts{ Name: "flux_http_total_usec", Help: "HTTP total response time in microseconds", }, httpLabels) if cf.AdvertiseAddr != "" { var err error if h.advertiser, err = newAdvertiser(cf); err != nil { return nil, err } } return h, nil }
func New(configFile string) (e exporter, err error) { e = exporter{ configFile: configFile, Metrics: map[string]*prometheus.GaugeVec{}, scrapeDuration: prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: namespace, Name: "scrape_duration_seconds", Help: "gmond_exporter: Duration of a scrape job.", }, []string{"endpoint", "result"}, ), metricsUpdated: prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: namespace, Name: "metrics_updated_count", Help: "gmond_exporter: Number of metrics updated.", }, []string{"endpoint"}, ), metricsExported: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "metrics_exported_count", Help: "gmond_exporter: Number of metrics exported.", }), configChan: make(chan config), listeningAddress: ":8080", gangliaScrapeInterval: 60 * time.Second, } conf, err := e.readConfig() if err != nil { return e, fmt.Errorf("Couldn't read config: %s", err) } e.conf = conf if conf.ListeningAddress != "" { e.listeningAddress = conf.ListeningAddress } if conf.GangliaScrapeInterval != 0 { e.gangliaScrapeInterval = time.Duration(conf.GangliaScrapeInterval) * time.Second } prometheus.MustRegister(e.scrapeDuration) prometheus.MustRegister(e.metricsUpdated) prometheus.MustRegister(e.metricsExported) debug("Registered internal metrics") sig := make(chan os.Signal, 1) signal.Notify(sig, syscall.SIGHUP) go func() { for _ = range sig { e.reloadConfig() // sends a new config to configChan } }() go e.serveStatus() return e, nil }
// NewSummary returns a new Histogram backed by a Prometheus summary. The // histogram is automatically registered via prometheus.Register. // // For more information on Prometheus histograms and summaries, refer to // http://prometheus.io/docs/practices/histograms. func NewSummary(opts prometheus.SummaryOpts, fieldKeys []string) metrics.Histogram { m := prometheus.NewSummaryVec(opts, fieldKeys) prometheus.MustRegister(m) return prometheusSummary{ SummaryVec: m, Pairs: pairsFrom(fieldKeys), } }
func main() { cfg, err := New() if err != nil { log.Fatalf("Failed to parse config: %s", err) return } runs := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "elasticsearch_backup_runs_total", Help: "Number of elasticsearch backup runs", }, []string{"status"}, ) runs = prometheus.MustRegisterOrGet(runs).(*prometheus.CounterVec) duration := prometheus.NewSummaryVec( prometheus.SummaryOpts{ Name: "elasticsearch_backup_duration", Help: "Duration of elasticsearch backup runs", }, []string{"operation"}, ) duration = prometheus.MustRegisterOrGet(duration).(*prometheus.SummaryVec) go listen() interval := time.Hour * time.Duration(cfg.Interval) for { t0 := time.Now() opFunc := func() error { return backupAndRemove(cfg) } logFunc := func(err error, wait time.Duration) { log.Warnf("Failed to connect to ES: %s. Retry in %s", err, wait) } bo := backoff.NewExponentialBackOff() bo.InitialInterval = time.Second bo.MaxInterval = 60 * time.Second bo.MaxElapsedTime = 15 * time.Minute log.Infof("Attempting Snapshot ...") err := backoff.RetryNotify(opFunc, bo, logFunc) if err != nil { runs.WithLabelValues("failed").Inc() log.Warnf("Failed to delete snapshots: %s", err) continue } runs.WithLabelValues("ok").Inc() d0 := float64(time.Since(t0)) / float64(time.Microsecond) duration.WithLabelValues("backup").Observe(d0) if interval < time.Second { break } log.Infof("Waiting %s until next run", interval.String()) time.Sleep(interval) } os.Exit(0) }
func main() { var ( listen = flag.String("listen", ":7801", "Server listen address") delay = flag.Duration("delay", 0, "Delay for responses") logRequests = flag.Bool("log.request", false, "logs http request info as JSON to stdout") requestDurations = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: namespace, Name: "requests_duration_nanoseconds", Help: "Amounts of time squirrel has spent answering requests in nanoseconds", }, labelNames, ) ) flag.Parse() if *listen == "" { flag.Usage() os.Exit(1) } prometheus.MustRegister(requestDurations) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { defer func(began time.Time, r *http.Request) { duration := float64(time.Since(began)) labels := prometheus.Labels{ "method": strings.ToLower(r.Method), "path": r.URL.Path, "code": strconv.Itoa(http.StatusOK), } for name, hdr := range eagleHeaders { v := r.Header.Get(hdr) if len(v) == 0 { v = "unknown" } labels[name] = v } requestDurations.With(labels).Observe(duration) if *logRequests { logRequest(r, began) } }(time.Now(), r) time.Sleep(*delay) fmt.Fprint(w, "OK") }) http.Handle("/metrics", prometheus.Handler()) log.Printf("Starting server on %s", *listen) log.Fatal(http.ListenAndServe(*listen, nil)) }
// New constructs a neww Notifier. func New(o *Options) *Notifier { ctx, cancel := context.WithCancel(context.Background()) return &Notifier{ queue: make(model.Alerts, 0, o.QueueCapacity), ctx: ctx, cancel: cancel, more: make(chan struct{}, 1), opts: o, latency: prometheus.NewSummaryVec(prometheus.SummaryOpts{ Namespace: namespace, Subsystem: subsystem, Name: "latency_seconds", Help: "Latency quantiles for sending alert notifications (not including dropped notifications).", }, []string{alertmanagerLabel}, ), errors: prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "errors_total", Help: "Total number of errors sending alert notifications.", }, []string{alertmanagerLabel}, ), sent: prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "sent_total", Help: "Total number of alerts successfully sent.", }, []string{alertmanagerLabel}, ), dropped: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "dropped_total", Help: "Total number of alerts dropped due to alert manager missing in configuration.", }), queueLength: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "queue_length", Help: "The number of alert notifications in the queue.", }), queueCapacity: prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(namespace, subsystem, "queue_capacity"), "The capacity of the alert notifications queue.", nil, nil, ), prometheus.GaugeValue, float64(o.QueueCapacity), ), } }
// NewNsqExecutor creates a new executor for the NSQ metrics. func NewNsqExecutor(namespace string) *NsqExecutor { return &NsqExecutor{ collectors: make(map[string]Collector), summary: prometheus.NewSummaryVec(prometheus.SummaryOpts{ Namespace: namespace, Subsystem: "exporter", Name: "scape_duration_seconds", Help: "Duration of a scrape job of the NSQ exporter", }, []string{"collector", "result"}), } }
func (group *Group) getSummaryVec(name string, description string, labelNames []string) *prometheus.SummaryVec { summaryVec := group.SummaryVecs[name] if summaryVec == nil { summaryVec = prometheus.NewSummaryVec(prometheus.SummaryOpts{ Namespace: "mongodb", Name: name, Help: description, }, labelNames) group.SummaryVecs[name] = summaryVec } return summaryVec }
func TestWriteSummary(t *testing.T) { sumVec := prometheus.NewSummaryVec( prometheus.SummaryOpts{ Name: "name", Help: "docstring", ConstLabels: prometheus.Labels{"constname": "constvalue"}, Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }, []string{"labelname"}, ) sumVec.WithLabelValues("val1").Observe(float64(10)) sumVec.WithLabelValues("val1").Observe(float64(20)) sumVec.WithLabelValues("val1").Observe(float64(30)) sumVec.WithLabelValues("val2").Observe(float64(20)) sumVec.WithLabelValues("val2").Observe(float64(30)) sumVec.WithLabelValues("val2").Observe(float64(40)) reg := prometheus.NewRegistry() reg.MustRegister(sumVec) mfs, err := reg.Gather() if err != nil { t.Fatalf("error: %v", err) } now := model.Time(1477043083) var buf bytes.Buffer err = writeMetrics(&buf, mfs, "prefix", now) if err != nil { t.Fatalf("error: %v", err) } want := `prefix.name.constname.constvalue.labelname.val1.quantile.0_5 20 1477043 prefix.name.constname.constvalue.labelname.val1.quantile.0_9 30 1477043 prefix.name.constname.constvalue.labelname.val1.quantile.0_99 30 1477043 prefix.name_sum.constname.constvalue.labelname.val1 60 1477043 prefix.name_count.constname.constvalue.labelname.val1 3 1477043 prefix.name.constname.constvalue.labelname.val2.quantile.0_5 30 1477043 prefix.name.constname.constvalue.labelname.val2.quantile.0_9 40 1477043 prefix.name.constname.constvalue.labelname.val2.quantile.0_99 40 1477043 prefix.name_sum.constname.constvalue.labelname.val2 90 1477043 prefix.name_count.constname.constvalue.labelname.val2 3 1477043 ` if got := buf.String(); want != got { t.Fatalf("wanted \n%s\n, got \n%s\n", want, got) } }
// helper func NewSummaryVec(name string, help string) *prometheus.SummaryVec { // I still think that a histogram is the way to go! // because computation is taken away from gogrinder // but I find Summary is much nicer in Grafana //elapsed := prometheus.NewHistogramVec(prometheus.HistogramOpts{ // Name: "gogrinder_elapsed_ms", // Help: "Current time elapsed of gogrinder teststep", //}, []string{"teststep"}) //regElapsed := prometheus.MustRegisterOrGet(elapsed).(*prometheus.HistogramVec) return prometheus.NewSummaryVec(prometheus.SummaryOpts{ Name: name, Help: help, Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001}, }, []string{"teststep"}) }
func newRegistry(constLabels prometheus.Labels) *registry { var ( latencies = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: namespace, Name: "request_durations_nanoseconds", Help: "The total duration of HTTP requests (nanoseconds).", ConstLabels: constLabels, }, labelNames, ) ) prometheus.MustRegister(latencies) return ®istry{latencies} }
type Reporter struct { hostID string hostName string includeProcesses bool includeNAT bool conntracker Conntracker natmapper *NATMapper revResolver *ReverseResolver } // SpyDuration is an exported prometheus metric var SpyDuration = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: "scope", Subsystem: "probe", Name: "spy_time_nanoseconds", Help: "Total time spent spying on active connections.", MaxAge: 10 * time.Second, // like statsd }, []string{}, ) // NewReporter creates a new Reporter that invokes procspy.Connections to // generate a report.Report that contains every discovered (spied) connection // on the host machine, at the granularity of host and port. That information // is stored in the Endpoint topology. It optionally enriches that topology // with process (PID) information. func NewReporter(hostID, hostName string, includeProcesses bool, useConntrack bool) *Reporter { var ( conntrackModulePresent = ConntrackModulePresent() conntracker Conntracker natmapper NATMapper
// NewMemorySeriesStorage returns a newly allocated Storage. Storage.Serve still // has to be called to start the storage. func NewMemorySeriesStorage(o *MemorySeriesStorageOptions) *MemorySeriesStorage { s := &MemorySeriesStorage{ fpLocker: newFingerprintLocker(o.NumMutexes), options: o, loopStopping: make(chan struct{}), loopStopped: make(chan struct{}), logThrottlingStopped: make(chan struct{}), throttled: make(chan struct{}, 1), maxMemoryChunks: o.MemoryChunks, dropAfter: o.PersistenceRetentionPeriod, checkpointInterval: o.CheckpointInterval, checkpointDirtySeriesLimit: o.CheckpointDirtySeriesLimit, archiveHighWatermark: model.Now().Add(-headChunkTimeout), maxChunksToPersist: o.MaxChunksToPersist, evictList: list.New(), evictRequests: make(chan evictRequest, evictRequestsCap), evictStopping: make(chan struct{}), evictStopped: make(chan struct{}), quarantineRequests: make(chan quarantineRequest, quarantineRequestsCap), quarantineStopping: make(chan struct{}), quarantineStopped: make(chan struct{}), persistErrors: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "persist_errors_total", Help: "The total number of errors while persisting chunks.", }), numSeries: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "memory_series", Help: "The current number of series in memory.", }), seriesOps: prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "series_ops_total", Help: "The total number of series operations by their type.", }, []string{opTypeLabel}, ), ingestedSamplesCount: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "ingested_samples_total", Help: "The total number of samples ingested.", }), discardedSamplesCount: prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "out_of_order_samples_total", Help: "The total number of samples that were discarded because their timestamps were at or before the last received sample for a series.", }, []string{discardReasonLabel}, ), nonExistentSeriesMatchesCount: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "non_existent_series_matches_total", Help: "How often a non-existent series was referred to during label matching or chunk preloading. This is an indication of outdated label indexes.", }), maintainSeriesDuration: prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: namespace, Subsystem: subsystem, Name: "maintain_series_duration_seconds", Help: "The duration in seconds it took to perform maintenance on a series.", }, []string{seriesLocationLabel}, ), persistenceUrgencyScore: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "persistence_urgency_score", Help: "A score of urgency to persist chunks, 0 is least urgent, 1 most.", }), rushedMode: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "rushed_mode", Help: "1 if the storage is in rushed mode, 0 otherwise. In rushed mode, the system behaves as if the persistence_urgency_score is 1.", }), } // Initialize metric vectors. // TODO(beorn7): Rework once we have a utility function for it in client_golang. s.discardedSamplesCount.WithLabelValues(outOfOrderTimestamp) s.discardedSamplesCount.WithLabelValues(duplicateSample) s.maintainSeriesDuration.WithLabelValues(maintainInMemory) s.maintainSeriesDuration.WithLabelValues(maintainArchived) s.seriesOps.WithLabelValues(create) s.seriesOps.WithLabelValues(archive) s.seriesOps.WithLabelValues(unarchive) s.seriesOps.WithLabelValues(memoryPurge) s.seriesOps.WithLabelValues(archivePurge) s.seriesOps.WithLabelValues(requestedPurge) s.seriesOps.WithLabelValues(memoryMaintenance) s.seriesOps.WithLabelValues(archiveMaintenance) s.seriesOps.WithLabelValues(completedQurantine) s.seriesOps.WithLabelValues(droppedQuarantine) s.seriesOps.WithLabelValues(failedQuarantine) return s }
const ( scrapeHealthMetricName = "up" scrapeDurationMetricName = "scrape_duration_seconds" // Constants for instrumentation. namespace = "prometheus" interval = "interval" scrapeJob = "scrape_job" ) var ( targetIntervalLength = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: namespace, Name: "target_interval_length_seconds", Help: "Actual intervals between scrapes.", Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, }, []string{interval}, ) targetSkippedScrapes = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, Name: "target_skipped_scrapes_total", Help: "Total number of scrapes that were skipped because the metric storage was throttled.", }, []string{interval}, ) targetReloadIntervalLength = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: namespace,
// Buildtime variables var ( Program = "ent" Commit = "0000000" Version = "0.0.0" ) // Telemetry var ( labelNames = []string{"bucket", "method", "operation", "status"} requestDurations = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: Program, Name: "requests_duration_nanoseconds", Help: "Amounts of time ent has spent answering requests in nanoseconds.", }, labelNames, ) // Note that the summary 'requestDurations' above will result in metrics // 'ent_requests_duration_nanoseconds_count' and // 'ent_requests_duration_nanoseconds_sum', counting the total number of // requests made and summing up the total amount of time ent has spent // to answer requests, respectively. requestBytes = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: Program, Name: "request_bytes_total", Help: "Total volume of request payloads emitted in bytes.", }, labelNames,
log "github.com/Sirupsen/logrus" "github.com/pandemicsyn/node_exporter/collector" "github.com/prometheus/client_golang/prometheus" ) const ( DefaultCollectors = "cpu,diskstats,entropy,filefd,filesystem,loadavg,meminfo,netdev,netstat,sockstat,stat,textfile,time,uname,version,vmstat" ) var ( scrapeDurations = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: collector.Namespace, Subsystem: "exporter", Name: "scrape_duration_seconds", Help: "node_exporter: Duration of a scrape job.", }, []string{"collector", "result"}, ) ) func New(collectors map[string]collector.Collector) NodeCollector { return NodeCollector{collectors: collectors} } // NodeCollector implements the prometheus.Collector interface. type NodeCollector struct { collectors map[string]collector.Collector }
// NewMemorySeriesStorage returns a newly allocated Storage. Storage.Serve still // has to be called to start the storage. func NewMemorySeriesStorage(o *MemorySeriesStorageOptions) Storage { s := &memorySeriesStorage{ fpLocker: newFingerprintLocker(1024), options: o, loopStopping: make(chan struct{}), loopStopped: make(chan struct{}), maxMemoryChunks: o.MemoryChunks, dropAfter: o.PersistenceRetentionPeriod, checkpointInterval: o.CheckpointInterval, checkpointDirtySeriesLimit: o.CheckpointDirtySeriesLimit, maxChunksToPersist: o.MaxChunksToPersist, evictList: list.New(), evictRequests: make(chan evictRequest, evictRequestsCap), evictStopping: make(chan struct{}), evictStopped: make(chan struct{}), persistErrors: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "persist_errors_total", Help: "The total number of errors while persisting chunks.", }), numSeries: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "memory_series", Help: "The current number of series in memory.", }), seriesOps: prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "series_ops_total", Help: "The total number of series operations by their type.", }, []string{opTypeLabel}, ), ingestedSamplesCount: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "ingested_samples_total", Help: "The total number of samples ingested.", }), outOfOrderSamplesCount: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "out_of_order_samples_total", Help: "The total number of samples that were discarded because their timestamps were at or before the last received sample for a series.", }), invalidPreloadRequestsCount: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "invalid_preload_requests_total", Help: "The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes.", }), maintainSeriesDuration: prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: namespace, Subsystem: subsystem, Name: "maintain_series_duration_milliseconds", Help: "The duration (in milliseconds) it took to perform maintenance on a series.", }, []string{seriesLocationLabel}, ), } return s }
cacheGetLatency = prometheus.NewSummary( prometheus.SummaryOpts{ Name: "etcd_request_cache_get_latencies_summary", Help: "Latency in microseconds of getting an object from etcd cache", }, ) cacheAddLatency = prometheus.NewSummary( prometheus.SummaryOpts{ Name: "etcd_request_cache_add_latencies_summary", Help: "Latency in microseconds of adding an object to etcd cache", }, ) etcdRequestLatenciesSummary = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Name: "etcd_request_latencies_summary", Help: "Etcd request latency summary in microseconds for each operation and object type.", }, []string{"operation", "type"}, ) ) var registerMetrics sync.Once // Register all metrics. func Register() { // Register the metrics. registerMetrics.Do(func() { prometheus.MustRegister(cacheHitCounter) prometheus.MustRegister(cacheMissCounter) prometheus.MustRegister(cacheEntryCounter) prometheus.MustRegister(cacheAddLatency)
// Capacity of the channel to buffer samples during ingestion. ingestedSamplesCap = 256 // Constants for instrumentation. namespace = "prometheus" interval = "interval" ) var ( errIngestChannelFull = errors.New("ingestion channel full") targetIntervalLength = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: namespace, Name: "target_interval_length_seconds", Help: "Actual intervals between scrapes.", Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, }, []string{interval}, ) ) func init() { prometheus.MustRegister(targetIntervalLength) } // TargetHealth describes the health state of a target. type TargetHealth int func (t TargetHealth) String() string { switch t {
"github.com/prometheus/client_golang/prometheus" "k8s.io/heapster/events/core" ) const ( DefaultSinkExportEventsTimeout = 20 * time.Second DefaultSinkStopTimeout = 60 * time.Second ) var ( // Time spent exporting events to sink in microseconds. exporterDuration = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: "eventer", Subsystem: "exporter", Name: "duration_microseconds", Help: "Time spent exporting events to sink in microseconds.", }, []string{"exporter"}, ) ) func init() { prometheus.MustRegister(exporterDuration) } type sinkHolder struct { sink core.EventSink eventBatchChannel chan *core.EventBatch stopChannel chan bool }
) // Constants for instrumentation. const ( namespace = "prometheus" ruleTypeLabel = "rule_type" ruleTypeAlerting = "alerting" ruleTypeRecording = "recording" ) var ( evalDuration = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: namespace, Name: "rule_evaluation_duration_milliseconds", Help: "The duration for a rule to execute.", }, []string{ruleTypeLabel}, ) evalFailures = prometheus.NewCounter( prometheus.CounterOpts{ Namespace: namespace, Name: "rule_evaluation_failures_total", Help: "The total number of rule evaluation failures.", }, ) iterationDuration = prometheus.NewSummary(prometheus.SummaryOpts{ Namespace: namespace, Name: "evaluator_duration_milliseconds", Help: "The duration for all evaluations to execute.", Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
"github.com/openshift/origin/pkg/cmd/util/clientcmd" "github.com/openshift/origin/pkg/util/proc" ) var ( observeCounts = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "observe_counts", Help: "Number of changes observed to the underlying resource.", }, []string{"type"}, ) execDurations = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Name: "observe_exec_durations_milliseconds", Help: "Item execution latency distributions.", }, []string{"type", "exit_code"}, ) nameExecDurations = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Name: "observe_name_exec_durations_milliseconds", Help: "Name list execution latency distributions.", }, []string{"exit_code"}, ) ) var ( observeLong = templates.LongDesc(` Observe changes to resources and take action on them
package rafthttp import ( "time" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/raft/raftpb" "github.com/prometheus/client_golang/prometheus" ) var ( msgSentDuration = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: "etcd", Subsystem: "rafthttp", Name: "message_sent_latency_microseconds", Help: "message sent latency distributions.", }, []string{"sendingType", "remoteID", "msgType"}, ) msgSentFailed = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "etcd", Subsystem: "rafthttp", Name: "message_sent_failed_total", Help: "The total number of failed messages sent.", }, []string{"sendingType", "remoteID", "msgType"}, ) )
DockerOperationsKey = "docker_operations_latency_microseconds" DockerErrorsKey = "docker_errors" ) var ( ContainersPerPodCount = prometheus.NewSummary( prometheus.SummaryOpts{ Subsystem: KubeletSubsystem, Name: "containers_per_pod_count", Help: "The number of containers per pod.", }, ) PodWorkerLatency = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Subsystem: KubeletSubsystem, Name: PodWorkerLatencyKey, Help: "Latency in microseconds to sync a single pod. Broken down by operation type: create, update, or sync", }, []string{"operation_type"}, ) SyncPodsLatency = prometheus.NewSummary( prometheus.SummaryOpts{ Subsystem: KubeletSubsystem, Name: SyncPodsLatencyKey, Help: "Latency in microseconds to sync all pods.", }, ) PodStartLatency = prometheus.NewSummary( prometheus.SummaryOpts{ Subsystem: KubeletSubsystem, Name: PodStartLatencyKey, Help: "Latency in microseconds for a single pod to go from pending to running. Broken down by podname.",
func ExampleSummaryVec() { temps := prometheus.NewSummaryVec( prometheus.SummaryOpts{ Name: "pond_temperature_celsius", Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. }, []string{"species"}, ) // Simulate some observations. for i := 0; i < 1000; i++ { temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10) } // Just for demonstration, let's check the state of the summary vector // by (ab)using its Collect method and the Write method of its elements // (which is usually only used by Prometheus internally - code like the // following will never appear in your own code). metricChan := make(chan prometheus.Metric) go func() { defer close(metricChan) temps.Collect(metricChan) }() metricStrings := []string{} for metric := range metricChan { dtoMetric := &dto.Metric{} metric.Write(dtoMetric) metricStrings = append(metricStrings, proto.MarshalTextString(dtoMetric)) } sort.Strings(metricStrings) // For reproducible print order. fmt.Println(metricStrings) // Output: // [label: < // name: "species" // value: "lithobates-catesbeianus" // > // summary: < // sample_count: 1000 // sample_sum: 31956.100000000017 // quantile: < // quantile: 0.5 // value: 32.4 // > // quantile: < // quantile: 0.9 // value: 41.4 // > // quantile: < // quantile: 0.99 // value: 41.9 // > // > // label: < // name: "species" // value: "litoria-caerulea" // > // summary: < // sample_count: 1000 // sample_sum: 29969.50000000001 // quantile: < // quantile: 0.5 // value: 31.1 // > // quantile: < // quantile: 0.9 // value: 41.3 // > // quantile: < // quantile: 0.99 // value: 41.9 // > // > // ] }
memProfile = flag.String("debug.memprofile-file", "", "Write memory profile to this file upon receipt of SIGUSR1.") listenAddress = flag.String("web.listen-address", ":9100", "Address on which to expose metrics and web interface.") metricsPath = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.") enabledCollectors = flag.String("collectors.enabled", "diskstats,filefd,filesystem,loadavg,mdadm,meminfo,netdev,netstat,sockstat,stat,textfile,time,uname", "Comma-separated list of collectors to use.") printCollectors = flag.Bool("collectors.print", false, "If true, print available collectors and exit.") authUser = flag.String("auth.user", "", "Username for basic auth.") authPass = flag.String("auth.pass", "", "Password for basic auth.") collectorLabelNames = []string{"collector", "result"} scrapeDurations = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: collector.Namespace, Subsystem: subsystem, Name: "scrape_duration_seconds", Help: "node_exporter: Duration of a scrape job.", }, collectorLabelNames, ) ) // NodeCollector implements the prometheus.Collector interface. type NodeCollector struct { collectors map[string]collector.Collector } // Describe implements the prometheus.Collector interface. func (n NodeCollector) Describe(ch chan<- *prometheus.Desc) { scrapeDurations.Describe(ch) }
// NewMemorySeriesStorage returns a newly allocated Storage. Storage.Serve still // has to be called to start the storage. func NewMemorySeriesStorage(o *MemorySeriesStorageOptions) Storage { s := &memorySeriesStorage{ fpLocker: newFingerprintLocker(1024), options: o, loopStopping: make(chan struct{}), loopStopped: make(chan struct{}), logThrottlingStopped: make(chan struct{}), throttled: make(chan struct{}, 1), maxMemoryChunks: o.MemoryChunks, dropAfter: o.PersistenceRetentionPeriod, checkpointInterval: o.CheckpointInterval, checkpointDirtySeriesLimit: o.CheckpointDirtySeriesLimit, archiveHighWatermark: model.Now().Add(-headChunkTimeout), maxChunksToPersist: o.MaxChunksToPersist, evictList: list.New(), evictRequests: make(chan evictRequest, evictRequestsCap), evictStopping: make(chan struct{}), evictStopped: make(chan struct{}), quarantineRequests: make(chan quarantineRequest, quarantineRequestsCap), quarantineStopping: make(chan struct{}), quarantineStopped: make(chan struct{}), persistErrors: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "persist_errors_total", Help: "The total number of errors while persisting chunks.", }), numSeries: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "memory_series", Help: "The current number of series in memory.", }), seriesOps: prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "series_ops_total", Help: "The total number of series operations by their type.", }, []string{opTypeLabel}, ), ingestedSamplesCount: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "ingested_samples_total", Help: "The total number of samples ingested.", }), outOfOrderSamplesCount: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "out_of_order_samples_total", Help: "The total number of samples that were discarded because their timestamps were at or before the last received sample for a series.", }), nonExistentSeriesMatchesCount: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Subsystem: subsystem, Name: "non_existent_series_matches_total", Help: "How often a non-existent series was referred to during label matching or chunk preloading. This is an indication of outdated label indexes.", }), maintainSeriesDuration: prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: namespace, Subsystem: subsystem, Name: "maintain_series_duration_milliseconds", Help: "The duration (in milliseconds) it took to perform maintenance on a series.", }, []string{seriesLocationLabel}, ), persistenceUrgencyScore: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "persistence_urgency_score", Help: "A score of urgency to persist chunks, 0 is least urgent, 1 most.", }), rushedMode: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: "rushed_mode", Help: "1 if the storage is in rushed mode, 0 otherwise. In rushed mode, the system behaves as if the persistence_urgency_score is 1.", }), } return s }
package metrics import ( "sync" "time" "github.com/prometheus/client_golang/prometheus" ) const restClientSubsystem = "rest_client" var ( RequestLatency = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Subsystem: restClientSubsystem, Name: "request_latency_microseconds", Help: "Request latency in microseconds. Broken down by verb and URL", }, []string{"verb", "url"}, ) ) var registerMetrics sync.Once // Register all metrics. func Register() { // Register the metrics. registerMetrics.Do(func() { prometheus.MustRegister(RequestLatency) }) }
}, []string{"verb", "resource", "client", "code"}, ) requestLatencies = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "apiserver_request_latencies", Help: "Response latency distribution in microseconds for each verb, resource and client.", // Use buckets ranging from 125 ms to 8 seconds. Buckets: prometheus.ExponentialBuckets(125000, 2.0, 7), }, []string{"verb", "resource"}, ) requestLatenciesSummary = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Name: "apiserver_request_latencies_summary", Help: "Response latency summary in microseconds for each verb and resource.", }, []string{"verb", "resource"}, ) ) // Register all metrics. func Register() { prometheus.MustRegister(requestCounter) prometheus.MustRegister(requestLatencies) prometheus.MustRegister(requestLatenciesSummary) } func Monitor(verb, resource *string, client string, httpCode *int, reqStart time.Time) { requestCounter.WithLabelValues(*verb, *resource, client, strconv.Itoa(*httpCode)).Inc() requestLatencies.WithLabelValues(*verb, *resource).Observe(float64((time.Since(reqStart)) / time.Microsecond))
const ( infraContainerName = "POD" // TODO: following constants are copied from k8s, change to use them directly kubernetesPodNameLabel = "io.kubernetes.pod.name" kubernetesPodNamespaceLabel = "io.kubernetes.pod.namespace" kubernetesPodUID = "io.kubernetes.pod.uid" kubernetesContainerLabel = "io.kubernetes.container.name" ) var ( // The Kubelet request latencies in microseconds. kubeletRequestLatency = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Namespace: "heapster", Subsystem: "kubelet", Name: "request_duration_microseconds", Help: "The Kubelet request latencies in microseconds.", }, []string{"node"}, ) ) func init() { prometheus.MustRegister(kubeletRequestLatency) } // Kubelet-provided metrics for pod and system container. type kubeletMetricsSource struct { host Host kubeletClient *KubeletClient nodename string