func main() { var ( fsRoot = flag.String("fs.root", "/tmp", "FileSystem root directory") httpAddress = flag.String("http.addr", ":5555", "HTTP listen address") providerDir = flag.String("provider.dir", "/tmp", "Provider directory with bucket policies") ) flag.Parse() prometheus.Register("ent_requests_total", "Total number of requests made", prometheus.NilLabels, requestTotal) prometheus.Register("ent_requests_duration_nanoseconds_total", "Total amount of time ent has spent to answer requests in nanoseconds", prometheus.NilLabels, requestDuration) prometheus.Register("ent_requests_duration_nanoseconds", "Amounts of time ent has spent answering requests in nanoseconds", prometheus.NilLabels, requestDurations) prometheus.Register("ent_request_bytes_total", "Total volume of request payloads emitted in bytes", prometheus.NilLabels, requestBytes) prometheus.Register("ent_response_bytes_total", "Total volume of response payloads emitted in bytes", prometheus.NilLabels, responseBytes) p, err := NewDiskProvider(*providerDir) if err != nil { log.Fatal(err) } fs := NewDiskFS(*fsRoot) r := pat.New() r.Get(fileRoute, handleGet(p, fs)) r.Post(fileRoute, handleCreate(p, fs)) r.Handle("/metrics", prometheus.DefaultRegistry.Handler()) r.Get("/", handleBucketList(p)) log.Fatal(http.ListenAndServe(*httpAddress, http.Handler(r))) }
// Reusable function for pushing metrics to prometheus. Handles initialization and so on. func promPushRunningPending(running, pending int) error { if TestContext.PrometheusPushGateway == "" { return nil } else { // Register metrics if necessary if !prom_registered && TestContext.PrometheusPushGateway != "" { prometheus.Register(runningMetric) prometheus.Register(pendingMetric) prom_registered = true } // Update metric values runningMetric.Set(float64(running)) pendingMetric.Set(float64(pending)) // Push them to the push gateway. This will be scraped by prometheus // provided you launch it with the pushgateway as an endpoint. if err := prometheus.Push( "e2e", "none", TestContext.PrometheusPushGateway, //i.e. "127.0.0.1:9091" ); err != nil { fmt.Println("failed at pushing to pushgateway ", err) return err } } return nil }
func ExampleCounter() { pushCounter := prometheus.NewCounter(prometheus.CounterOpts{ Name: "repository_pushes", // Note: No help string... }) err := prometheus.Register(pushCounter) // ... so this will return an error. if err != nil { fmt.Println("Push counter couldn't be registered, no counting will happen:", err) return } // Try it once more, this time with a help string. pushCounter = prometheus.NewCounter(prometheus.CounterOpts{ Name: "repository_pushes", Help: "Number of pushes to external repository.", }) err = prometheus.Register(pushCounter) if err != nil { fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err) return } pushComplete := make(chan struct{}) // TODO: Start a goroutine that performs repository pushes and reports // each completion via the channel. for _ = range pushComplete { pushCounter.Inc() } // Output: // Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string }
func init() { prometheus.Register( "esmc_requests", "A counter of the total number of requests to an ES cluster", prometheus.NilLabels, requestCount, ) prometheus.Register( "esmc_request_total_duration_nanoseconds", "The total amount of time spent executing requests (nanoseconds)", prometheus.NilLabels, requestDuration, ) prometheus.Register( "esmc_request_durations_nanoseconds", "The amounts of time spent executing requests (nanoseconds)", prometheus.NilLabels, requestDurations, ) prometheus.Register( "esmc_reported_request_total_duration_nanoseconds", "The total amount of time spent executing requests as reported by elasticsearch (nanoseconds)", prometheus.NilLabels, reportedRequestDuration, ) prometheus.Register( "esmc_reported_request_durations_nanoseconds", "The amounts of time spent executing requests as reported by elasticsearch (nanoseconds)", prometheus.NilLabels, reportedRequestDurations, ) }
func (m *measurer) Init() error { if err := prometheus.Register(m.registry.total); err != nil { return err } if err := prometheus.Register(m.registry.errors); err != nil { return err } if err := prometheus.Register(m.registry.duration); err != nil { return err } return nil }
func registerMetrics() (err error) { items, err := muninList() if err != nil { return } for _, name := range items { graphs = append(graphs, name) configs, graphConfig, err := muninConfig(name) if err != nil { return err } for metric, config := range configs { metricName := strings.Replace(name+"_"+metric, "-", "_", -1) desc := graphConfig["graph_title"] + ": " + config["label"] if config["info"] != "" { desc = desc + ", " + config["info"] } muninType := strings.ToLower(config["type"]) // muninType can be empty and defaults to gauge if muninType == "counter" || muninType == "derive" { gv := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: metricName, Help: desc, ConstLabels: prometheus.Labels{"type": muninType}, }, []string{"hostname", "graphname", "muninlabel"}, ) //log.Printf("Registered counter %s: %s", metricName, desc) counterPerMetric[metricName] = gv prometheus.Register(gv) } else { gv := prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: metricName, Help: desc, ConstLabels: prometheus.Labels{"type": "gauge"}, }, []string{"hostname", "graphname", "muninlabel"}, ) //log.Printf("Registered gauge %s: %s", metricName, desc) gaugePerMetric[metricName] = gv prometheus.Register(gv) } } } return nil }
func (e *exporter) setMetric(name string, labels map[string]string, metric Metric) { debug("%s{%s} = %f", name, labels, metric.Value) e.Lock() defer e.Unlock() if _, ok := e.Metrics[name]; !ok { var desc string var title string for _, element := range metric.ExtraData.ExtraElements { switch element.Name { case "DESC": desc = element.Val case "TITLE": title = element.Val } if title != "" && desc != "" { break } } debug("New metric: %s (%s)", name, desc) gv := prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: name, Help: desc, }, prometheusLabels, ) e.Metrics[name] = gv prometheus.Register(gv) // One GaugeVec per metric! } e.Metrics[name].With(labels).Set(metric.Value) }
func metricsInit(servicename string) *syndicateMetrics { m := syndicateMetrics{} m.managedNodes = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "ManagedNodes", Help: "Current number of nodes managed.", ConstLabels: prometheus.Labels{"servicename": servicename}, }) m.subscriberNodes = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "SubscriberNodes", Help: "Current number of unmanaged nodes subscribed for ring changes.", ConstLabels: prometheus.Labels{"servicename": servicename}, }) prometheus.Register(m.managedNodes) prometheus.Register(m.subscriberNodes) return &m }
func registerMetrics() (err error) { items, err := muninList() if err != nil { return } for _, name := range items { graphs = append(graphs, name) configs, graphConfig, err := muninConfig(name) if err != nil { return err } for metric, config := range configs { metricName := name + "-" + metric desc := graphConfig["graph_title"] + ": " + config["label"] if config["info"] != "" { desc = desc + ", " + config["info"] } gv := prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: metricName, Help: desc, }, []string{"hostname"}, ) log.Printf("Registered %s: %s", metricName, desc) gaugePerMetric[metricName] = gv prometheus.Register(gv) } } return nil }
// New returns a new unconfigured server. parentAddr is the address of // a parent, pass the empty string to create a root server. This // function should be called only once, as it registers metrics. func New(ctx context.Context, id string, parentAddr string, leader election.Election, opts ...connection.Option) (*Server, error) { s, err := NewIntermediate(ctx, id, parentAddr, leader, opts...) if err != nil { return nil, err } return s, prometheus.Register(s) }
func init() { prometheus.Register("prometheus_metric_disk_operations_total", "Total number of metric-related disk operations.", prometheus.NilLabels, storageOperations) prometheus.Register("prometheus_metric_disk_latency_microseconds", "Latency for metric disk operations in microseconds.", prometheus.NilLabels, storageLatency) prometheus.Register("prometheus_storage_operation_time_total_microseconds", "The total time spent performing a given storage operation.", prometheus.NilLabels, storageOperationDurations) prometheus.Register("prometheus_storage_queue_sizes_total", "The various sizes and capacities of the storage queues.", prometheus.NilLabels, queueSizes) prometheus.Register("prometheus_curation_filter_operations_total", "The number of curation filter operations completed.", prometheus.NilLabels, curationFilterOperations) prometheus.Register("prometheus_curation_duration_ms_total", "The total time spent in curation (ms).", prometheus.NilLabels, curationDuration) prometheus.Register("prometheus_curation_durations_ms", "Histogram of time spent in curation (ms).", prometheus.NilLabels, curationDurations) prometheus.Register("prometheus_stored_samples_total", "The number of samples that have been stored.", prometheus.NilLabels, storedSamplesCount) }
// NewSensors creates new sensors from a raw config func NewSensors(raw []interface{}) ([]*Sensor, error) { var sensors []*Sensor if err := utils.DecodeRaw(raw, &sensors); err != nil { return nil, fmt.Errorf("Sensor configuration error: %v", err) } for _, s := range sensors { check, err := commands.NewCommand(s.CheckExec, s.Timeout) if err != nil { return nil, fmt.Errorf("could not parse check in sensor %s: %s", s.Name, err) } check.Name = fmt.Sprintf("%s.sensor", s.Name) s.checkCmd = check // the prometheus client lib's API here is baffling... they don't expose // an interface or embed their Opts type in each of the Opts "subtypes", // so we can't share the initialization. switch { case s.Type == "counter": s.collector = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: s.Namespace, Subsystem: s.Subsystem, Name: s.Name, Help: s.Help, }) case s.Type == "gauge": s.collector = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: s.Namespace, Subsystem: s.Subsystem, Name: s.Name, Help: s.Help, }) case s.Type == "histogram": s.collector = prometheus.NewHistogram(prometheus.HistogramOpts{ Namespace: s.Namespace, Subsystem: s.Subsystem, Name: s.Name, Help: s.Help, }) case s.Type == "summary": s.collector = prometheus.NewSummary(prometheus.SummaryOpts{ Namespace: s.Namespace, Subsystem: s.Subsystem, Name: s.Name, Help: s.Help, }) default: return nil, fmt.Errorf("invalid sensor type: %s", s.Type) } // we're going to unregister before every attempt to register // so that we can reload config prometheus.Unregister(s.collector) if err := prometheus.Register(s.collector); err != nil { return nil, err } } return sensors, nil }
func (_ prometheusMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { adds := prometheus.NewCounter(prometheus.CounterOpts{ Subsystem: name, Name: "adds", Help: "Total number of adds handled by workqueue: " + name, }) prometheus.Register(adds) return adds }
func (_ prometheusMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { retries := prometheus.NewCounter(prometheus.CounterOpts{ Subsystem: name, Name: "retries", Help: "Total number of retries handled by workqueue: " + name, }) prometheus.Register(retries) return retries }
func (_ prometheusMetricsProvider) NewLatencyMetric(name string) workqueue.SummaryMetric { latency := prometheus.NewSummary(prometheus.SummaryOpts{ Subsystem: name, Name: "queue_latency", Help: "How long an item stays in workqueue" + name + " before being requested.", }) prometheus.Register(latency) return latency }
func (_ prometheusMetricsProvider) NewWorkDurationMetric(name string) workqueue.SummaryMetric { workDuration := prometheus.NewSummary(prometheus.SummaryOpts{ Subsystem: name, Name: "work_duration", Help: "How long processing an item from workqueue" + name + " takes.", }) prometheus.Register(workDuration) return workDuration }
func (_ prometheusMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { depth := prometheus.NewGauge(prometheus.GaugeOpts{ Subsystem: name, Name: "depth", Help: "Current depth of workqueue: " + name, }) prometheus.Register(depth) return depth }
func main() { fs := flag.NewFlagSet("mesos-exporter", flag.ExitOnError) addr := fs.String("addr", ":9110", "Address to listen on") masterURL := fs.String("master", "", "Expose metrics from master running on this URL") slaveURL := fs.String("slave", "", "Expose metrics from slave running on t his URL") timeout := fs.Duration("timeout", 5*time.Second, "Master polling timeout") fs.Parse(os.Args[1:]) if *masterURL != "" && *slaveURL != "" { log.Fatal("Only -master or -slave can be given at a time") } switch { case *masterURL != "": for _, c := range []prometheus.Collector{ newMasterCollector(*masterURL, *timeout), newMasterStateCollector(*masterURL, *timeout), } { if err := prometheus.Register(c); err != nil { log.Fatal(err) } } log.Printf("Exposing master metrics on %s", *addr) case *slaveURL != "": for _, c := range []prometheus.Collector{ newSlaveCollector(*slaveURL, *timeout), newSlaveMonitorCollector(*slaveURL, *timeout), } { if err := prometheus.Register(c); err != nil { log.Fatal(err) } } log.Printf("Exposing slave metrics on %s", *addr) default: log.Fatal("Either -master or -slave is required") } http.Handle("/metrics", prometheus.Handler()) if err := http.ListenAndServe(*addr, nil); err != nil { log.Fatal(err) } }
func (n *NGINXController) setupMonitor(args []string) { pc, err := newProcessCollector(true, exeMatcher{"nginx", args}) if err != nil { glog.Fatalf("unexpected error registering nginx collector: %v", err) } err = prometheus.Register(pc) if err != nil { glog.Warningf("unexpected error registering nginx collector: %v", err) } }
func TestRegisterWithOrGet(t *testing.T) { // Replace the default registerer just to be sure. This is bad, but this // whole test will go away once RegisterOrGet is removed. oldRegisterer := prometheus.DefaultRegisterer defer func() { prometheus.DefaultRegisterer = oldRegisterer }() prometheus.DefaultRegisterer = prometheus.NewRegistry() original := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "test", Help: "help", }, []string{"foo", "bar"}, ) equalButNotSame := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "test", Help: "help", }, []string{"foo", "bar"}, ) var err error if err = prometheus.Register(original); err != nil { t.Fatal(err) } if err = prometheus.Register(equalButNotSame); err == nil { t.Fatal("expected error when registringe equal collector") } if are, ok := err.(prometheus.AlreadyRegisteredError); ok { if are.ExistingCollector != original { t.Error("expected original collector but got something else") } if are.ExistingCollector == equalButNotSame { t.Error("expected original callector but got new one") } } else { t.Error("unexpected error:", err) } }
// EnableHandlingTimeHistogram turns on recording of handling time of RPCs for server-side interceptors. // Histogram metrics can be very expensive for Prometheus to retain and query. func EnableHandlingTimeHistogram(opts ...HistogramOption) { for _, o := range opts { o(&serverHandledHistogramOpts) } if !serverHandledHistogramEnabled { serverHandledHistogram = prom.NewHistogramVec( serverHandledHistogramOpts, []string{"grpc_type", "grpc_service", "grpc_method"}, ) prom.Register(serverHandledHistogram) } serverHandledHistogramEnabled = true }
func newQueueMetrics(name string) queueMetrics { var ret *defaultQueueMetrics if len(name) == 0 { return ret } ret = &defaultQueueMetrics{ depth: prometheus.NewGauge(prometheus.GaugeOpts{ Subsystem: name, Name: "depth", Help: "Current depth of workqueue: " + name, }), adds: prometheus.NewCounter(prometheus.CounterOpts{ Subsystem: name, Name: "adds", Help: "Total number of adds handled by workqueue: " + name, }), latency: prometheus.NewSummary(prometheus.SummaryOpts{ Subsystem: name, Name: "queue_latency", Help: "How long an item stays in workqueue" + name + " before being requested.", }), workDuration: prometheus.NewSummary(prometheus.SummaryOpts{ Subsystem: name, Name: "work_duration", Help: "How long processing an item from workqueue" + name + " takes.", }), addTimes: map[t]time.Time{}, processingStartTimes: map[t]time.Time{}, } prometheus.Register(ret.depth) prometheus.Register(ret.adds) prometheus.Register(ret.latency) prometheus.Register(ret.workDuration) return ret }
func (c *GaugeContainer) Get(metricName string, labels prometheus.Labels) prometheus.Gauge { hash := hashNameAndLabels(metricName, labels) gauge, ok := c.Elements[hash] if !ok { gauge = prometheus.NewGauge(prometheus.GaugeOpts{ Name: metricName, Help: defaultHelp, ConstLabels: labels, }) c.Elements[hash] = gauge if err := prometheus.Register(gauge); err != nil { log.Fatalf(regErrF, metricName, err) } } return gauge }
func (c *CounterContainer) Get(metricName string, labels prometheus.Labels) prometheus.Counter { hash := hashNameAndLabels(metricName, labels) counter, ok := c.Elements[hash] if !ok { counter = prometheus.NewCounter(prometheus.CounterOpts{ Name: metricName, Help: defaultHelp, ConstLabels: labels, }) c.Elements[hash] = counter if err := prometheus.Register(counter); err != nil { log.Fatalf(regErrF, metricName, err) } } return counter }
func registerConsulCollector(consulInfo string) { c := newConsulCollector(consulInfo) for { if err := prometheus.Register(c); err != nil { logger.Printf( "prometheus - could not register collector (-consul.info=%s)", consulInfo, ) <-time.After(1 * time.Second) continue } break } }
func ExampleAlreadyRegisteredError() { reqCounter := prometheus.NewCounter(prometheus.CounterOpts{ Name: "requests_total", Help: "The total number of requests served.", }) if err := prometheus.Register(reqCounter); err != nil { if are, ok := err.(prometheus.AlreadyRegisteredError); ok { // A counter for that metric has been registered before. // Use the old counter from now on. reqCounter = are.ExistingCollector.(prometheus.Counter) } else { // Something else went wrong! panic(err) } } }
func (c *SummaryContainer) Get(metricName string, labels prometheus.Labels) prometheus.Summary { hash := hashNameAndLabels(metricName, labels) summary, ok := c.Elements[hash] if !ok { summary = prometheus.NewSummary( prometheus.SummaryOpts{ Name: metricName, Help: defaultHelp, ConstLabels: labels, }) c.Elements[hash] = summary if err := prometheus.Register(summary); err != nil { log.Fatalf(regErrF, metricName, err) } } return summary }
func ExampleGaugeFunc() { if err := prometheus.Register(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ Subsystem: "runtime", Name: "goroutines_count", Help: "Number of goroutines that currently exist.", }, func() float64 { return float64(runtime.NumGoroutine()) }, )); err == nil { fmt.Println("GaugeFunc 'goroutines_count' registered.") } // Note that the count of goroutines is a gauge (and not a counter) as // it can go up and down. // Output: // GaugeFunc 'goroutines_count' registered. }
func newRetryMetrics(name string) retryMetrics { var ret *defaultRetryMetrics if len(name) == 0 { return ret } ret = &defaultRetryMetrics{ retries: prometheus.NewCounter(prometheus.CounterOpts{ Subsystem: name, Name: "retries", Help: "Total number of retries handled by workqueue: " + name, }), } prometheus.Register(ret.retries) return ret }
func (p *PrometheusClient) Start() error { p.metrics = make(map[string]prometheus.Metric) p.lastMetrics = make(map[string]prometheus.Metric) prometheus.Register(p) defer func() { if r := recover(); r != nil { // recovering from panic here because there is no way to stop a // running http go server except by a kill signal. Since the server // does not stop on SIGHUP, Start() will panic when the process // is reloaded. } }() if p.Listen == "" { p.Listen = "localhost:9126" } http.Handle("/metrics", prometheus.Handler()) server := &http.Server{ Addr: p.Listen, } go server.ListenAndServe() return nil }