Beispiel #1
0
func newMetrics(r prometheus.Registerer) *metrics {
	m := &metrics{}

	m.gcDuration = prometheus.NewSummary(prometheus.SummaryOpts{
		Name: "alertmanager_silences_gc_duration_seconds",
		Help: "Duration of the last silence garbage collection cycle.",
	})
	m.snapshotDuration = prometheus.NewSummary(prometheus.SummaryOpts{
		Name: "alertmanager_silences_snapshot_duration_seconds",
		Help: "Duration of the last silence snapshot.",
	})
	m.queriesTotal = prometheus.NewCounter(prometheus.CounterOpts{
		Name: "alertmanager_silences_queries_total",
		Help: "How many silence queries were received.",
	})
	m.queryErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{
		Name: "alertmanager_silences_query_errors_total",
		Help: "How many silence received queries did not succeed.",
	})
	m.queryDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
		Name: "alertmanager_silences_query_duration_seconds",
		Help: "Duration of silence query evaluation.",
	})

	if r != nil {
		r.MustRegister(
			m.gcDuration,
			m.snapshotDuration,
			m.queriesTotal,
			m.queryErrorsTotal,
			m.queryDuration,
		)
	}
	return m
}
Beispiel #2
0
func newMetrics(r prometheus.Registerer) *metrics {
	m := &metrics{}

	m.gcDuration = prometheus.NewSummary(prometheus.SummaryOpts{
		Name: "alertmanager_nflog_gc_duration_seconds",
		Help: "Duration of the last notification log garbage collection cycle.",
	})
	m.snapshotDuration = prometheus.NewSummary(prometheus.SummaryOpts{
		Name: "alertmanager_nflog_snapshot_duration_seconds",
		Help: "Duration of the last notification log snapshot.",
	})
	m.queriesTotal = prometheus.NewCounter(prometheus.CounterOpts{
		Name: "alertmanager_nflog_queries_total",
		Help: "Number of notification log queries were received.",
	})
	m.queryErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{
		Name: "alertmanager_nflog_query_errors_total",
		Help: "Number notification log received queries that failed.",
	})
	m.queryDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
		Name: "alertmanager_nflog_query_duration_seconds",
		Help: "Duration of notification log query evaluation.",
	})

	if r != nil {
		r.MustRegister(
			m.gcDuration,
			m.snapshotDuration,
			m.queriesTotal,
			m.queryErrorsTotal,
			m.queryDuration,
		)
	}
	return m
}
Beispiel #3
0
func TestSensorRecordHistogram(t *testing.T) {
	testServer := httptest.NewServer(prometheus.UninstrumentedHandler())
	defer testServer.Close()

	sensor := &Sensor{
		Type: "histogram",
		collector: prometheus.NewHistogram(prometheus.HistogramOpts{
			Namespace: "telemetry",
			Subsystem: "sensors",
			Name:      "TestSensorRecordHistogram",
			Help:      "help",
		})}
	prometheus.MustRegister(sensor.collector)
	patt := `telemetry_sensors_TestSensorRecordHistogram_bucket{le="([\.0-9|\+Inf]*)"} ([1-9])`

	sensor.record("1.2")
	resp := getFromTestServer(t, testServer)
	expected := [][]string{{"2.5", "1"}, {"5", "1"}, {"10", "1"}, {"+Inf", "1"}}
	if !checkBuckets(resp, patt, expected) {
		t.Fatalf("Failed to get match for sensor in response")
	}
	sensor.record("1.2") // same value should add
	resp = getFromTestServer(t, testServer)
	expected = [][]string{{"2.5", "2"}, {"5", "2"}, {"10", "2"}, {"+Inf", "2"}}
	if !checkBuckets(resp, patt, expected) {
		t.Fatalf("Failed to get match for sensor in response")
	}
	sensor.record("4.5") // overlapping should overlap
	resp = getFromTestServer(t, testServer)
	expected = [][]string{{"2.5", "2"}, {"5", "3"}, {"10", "3"}, {"+Inf", "3"}}
	if !checkBuckets(resp, patt, expected) {
		t.Fatalf("Failed to get match for sensor in response")
	}
}
Beispiel #4
0
func (n *Namespace) NewTimer(name, help string) Timer {
	t := &timer{
		m: prometheus.NewHistogram(n.newTimerOpts(name, help)),
	}
	n.addMetric(t)
	return t
}
Beispiel #5
0
// NewSensors creates new sensors from a raw config
func NewSensors(raw []interface{}) ([]*Sensor, error) {
	var sensors []*Sensor
	if err := utils.DecodeRaw(raw, &sensors); err != nil {
		return nil, fmt.Errorf("Sensor configuration error: %v", err)
	}
	for _, s := range sensors {
		check, err := commands.NewCommand(s.CheckExec, s.Timeout)
		if err != nil {
			return nil, fmt.Errorf("could not parse check in sensor %s: %s", s.Name, err)
		}
		check.Name = fmt.Sprintf("%s.sensor", s.Name)
		s.checkCmd = check

		// the prometheus client lib's API here is baffling... they don't expose
		// an interface or embed their Opts type in each of the Opts "subtypes",
		// so we can't share the initialization.
		switch {
		case s.Type == "counter":
			s.collector = prometheus.NewCounter(prometheus.CounterOpts{
				Namespace: s.Namespace,
				Subsystem: s.Subsystem,
				Name:      s.Name,
				Help:      s.Help,
			})
		case s.Type == "gauge":
			s.collector = prometheus.NewGauge(prometheus.GaugeOpts{
				Namespace: s.Namespace,
				Subsystem: s.Subsystem,
				Name:      s.Name,
				Help:      s.Help,
			})
		case s.Type == "histogram":
			s.collector = prometheus.NewHistogram(prometheus.HistogramOpts{
				Namespace: s.Namespace,
				Subsystem: s.Subsystem,
				Name:      s.Name,
				Help:      s.Help,
			})
		case s.Type == "summary":
			s.collector = prometheus.NewSummary(prometheus.SummaryOpts{
				Namespace: s.Namespace,
				Subsystem: s.Subsystem,
				Name:      s.Name,
				Help:      s.Help,
			})
		default:
			return nil, fmt.Errorf("invalid sensor type: %s", s.Type)
		}
		// we're going to unregister before every attempt to register
		// so that we can reload config
		prometheus.Unregister(s.collector)
		if err := prometheus.Register(s.collector); err != nil {
			return nil, err
		}
	}
	return sensors, nil
}
Beispiel #6
0
func ExampleHistogram() {
	temps := prometheus.NewHistogram(prometheus.HistogramOpts{
		Name:    "pond_temperature_celsius",
		Help:    "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
		Buckets: prometheus.LinearBuckets(20, 5, 5),  // 5 buckets, each 5 centigrade wide.
	})

	// Simulate some observations.
	for i := 0; i < 1000; i++ {
		temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
	}

	// Just for demonstration, let's check the state of the histogram by
	// (ab)using its Write method (which is usually only used by Prometheus
	// internally).
	metric := &dto.Metric{}
	temps.Write(metric)
	fmt.Println(proto.MarshalTextString(metric))

	// Output:
	// histogram: <
	//   sample_count: 1000
	//   sample_sum: 29969.50000000001
	//   bucket: <
	//     cumulative_count: 192
	//     upper_bound: 20
	//   >
	//   bucket: <
	//     cumulative_count: 366
	//     upper_bound: 25
	//   >
	//   bucket: <
	//     cumulative_count: 501
	//     upper_bound: 30
	//   >
	//   bucket: <
	//     cumulative_count: 638
	//     upper_bound: 35
	//   >
	//   bucket: <
	//     cumulative_count: 816
	//     upper_bound: 40
	//   >
	// >
}
Beispiel #7
0
func newHistogram(opts prometheus.Opts, cutoffs []int64) (prometheus.Histogram, func(int64)) {
	buckets := make([]float64, len(cutoffs))
	for i := range cutoffs {
		buckets[i] = float64(cutoffs[i])
	}
	hOpts := prometheus.HistogramOpts{
		Namespace:   opts.Namespace,
		Subsystem:   opts.Subsystem,
		Name:        opts.Name,
		Help:        opts.Help,
		ConstLabels: opts.ConstLabels,
		Buckets:     buckets,
	}
	m := prometheus.NewHistogram(hOpts)
	return m, func(n int64) {
		m.Observe(float64(n))
	}
}
Beispiel #8
0
func main() {

	router := routers.NewRouter()
	var port string
	fmt.Printf("len(os.Args) = %d ", len(os.Args))

	if len(os.Args) <= 1 {
		fmt.Println("Please speficy the port or use default port 8090")
		port = "8090"
	} else {
		port = os.Args[1]
	}
	fmt.Printf("port = %s", port)

	// histogram
	temps := prometheus.NewHistogram(prometheus.HistogramOpts{
		Name:    "pond_temperature_celsius",
		Help:    "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells.
		Buckets: prometheus.LinearBuckets(20, 5, 5),  // 5 buckets, each 5 centigrade wide.
	})

	// Simulate some observations.
	for i := 0; i < 1000; i++ {
		temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)
	}

	// Just for demonstration, let's check the state of the histogram by
	// (ab)using its Write method (which is usually only used by Prometheus
	// internally).
	metric := &dto.Metric{}
	temps.Write(metric)
	fmt.Println(proto.MarshalTextString(metric))

	router.Handle("/metrics", prometheus.Handler())

	//log.Fatal(http.ListenAndServe(":8090", router))
	log.Fatal(http.ListenAndServe(":"+port, router))

}
Beispiel #9
0
// NewHistogram creates a new configured prometheus histogram object.
func NewHistogram(config HistogramConfig) (spec.Histogram, error) {
	newHistogram := &histogram{
		HistogramConfig: config,
	}

	if len(newHistogram.Buckets) == 0 {
		return nil, maskAnyf(invalidConfigError, "buckets must not be empty")
	}
	if newHistogram.Help == "" {
		return nil, maskAnyf(invalidConfigError, "help must not be empty")
	}
	if newHistogram.Name == "" {
		return nil, maskAnyf(invalidConfigError, "name must not be empty")
	}

	newHistogram.ClientHistogram = prometheusclient.NewHistogram(prometheusclient.HistogramOpts{
		Buckets: newHistogram.Buckets,
		Help:    newHistogram.Help,
		Name:    newHistogram.Name,
	})

	return newHistogram, nil
}
func init() {
	flag.StringVar(&iface, "i", "eth0", "Interface to read packets from")
	flag.StringVar(&fname, "r", "", "Filename to read from, overrides -i")
	flag.IntVar(&snaplen, "s", 65536, "Number of max bytes to read per packet")
	flag.StringVar(&tstype, "t", "", "Type of timestamp to use")
	flag.IntVar(&port, "P", 9119, "The port number to listen on ")
	flag.BoolVar(&verbose, "v", false, "Enable verbose mode")
}

// the currently max seen response time value
var max = 0.0

// prometheus histogram (https://godoc.org/github.com/prometheus/client_golang/prometheus#Histogram)
var rtHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
	Name:    "mongodb_histogram_response_time",
	Help:    "Response time for MongoDB operations",
	Buckets: prometheus.ExponentialBuckets(0.00000001, 2, 10),
})

// prometheus summary (https://godoc.org/github.com/prometheus/client_golang/prometheus#Summary)
var rtSummary = prometheus.NewSummary(prometheus.SummaryOpts{
	Name: "mongodb_summary_response_time",
	Help: "Response time for MongoDB operations",
})

// prometheus gauge (https://godoc.org/github.com/prometheus/client_golang/prometheus#Gauge)
var rtMax = prometheus.NewGauge(prometheus.GaugeOpts{
	Namespace: "ognom",
	Name:      "mongodb_max_response_time",
	Help:      "Max response time seen for MongoDB operations in the last 10 seconds",
})
Beispiel #11
0
	// differentiated via a "service" label.
	rpcDurations = prometheus.NewSummaryVec(
		prometheus.SummaryOpts{
			Name:       "rpc_durations_seconds",
			Help:       "RPC latency distributions.",
			Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
		},
		[]string{"service"},
	)
	// The same as above, but now as a histogram, and only for the normal
	// distribution. The buckets are targeted to the parameters of the
	// normal distribution, with 20 buckets centered on the mean, each
	// half-sigma wide.
	rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
		Name:    "rpc_durations_histogram_seconds",
		Help:    "RPC latency distributions.",
		Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20),
	})
)

func init() {
	// Register the summary and the histogram with Prometheus's default registry.
	prometheus.MustRegister(rpcDurations)
	prometheus.MustRegister(rpcDurationsHistogram)
}

func main() {
	flag.Parse()

	start := time.Now()
Beispiel #12
0
func newMasterCollector(httpClient *httpClient) prometheus.Collector {
	metrics := map[prometheus.Collector]func(metricMap, prometheus.Collector) error{
		// CPU/Disk/Mem resources in free/used
		gauge("master", "cpus", "Current CPU resources in cluster.", "type"): func(m metricMap, c prometheus.Collector) error {
			total, ok := m["master/cpus_total"]
			used, ok := m["master/cpus_used"]
			if !ok {
				return notFoundInMap
			}
			c.(*prometheus.GaugeVec).WithLabelValues("free").Set(total - used)
			c.(*prometheus.GaugeVec).WithLabelValues("used").Set(used)
			return nil
		},
		gauge("master", "cpus_revocable", "Current revocable CPU resources in cluster.", "type"): func(m metricMap, c prometheus.Collector) error {
			total, ok := m["master/cpus_revocable_total"]
			used, ok := m["master/cpus_revocable_used"]
			if !ok {
				return notFoundInMap
			}
			c.(*prometheus.GaugeVec).WithLabelValues("free").Set(total - used)
			c.(*prometheus.GaugeVec).WithLabelValues("used").Set(used)
			return nil
		},
		gauge("master", "mem", "Current memory resources in cluster.", "type"): func(m metricMap, c prometheus.Collector) error {
			total, ok := m["master/mem_total"]
			used, ok := m["master/mem_used"]
			if !ok {
				return notFoundInMap
			}
			c.(*prometheus.GaugeVec).WithLabelValues("free").Set(total - used)
			c.(*prometheus.GaugeVec).WithLabelValues("used").Set(used)
			return nil
		},
		gauge("master", "mem_revocable", "Current revocable memory resources in cluster.", "type"): func(m metricMap, c prometheus.Collector) error {
			total, ok := m["master/mem_revocable_total"]
			used, ok := m["master/mem_revocable_used"]
			if !ok {
				return notFoundInMap
			}
			c.(*prometheus.GaugeVec).WithLabelValues("free").Set(total - used)
			c.(*prometheus.GaugeVec).WithLabelValues("used").Set(used)
			return nil
		},
		gauge("master", "disk", "Current disk resources in cluster.", "type"): func(m metricMap, c prometheus.Collector) error {
			total, ok := m["master/disk_total"]
			used, ok := m["master/disk_used"]
			if !ok {
				return notFoundInMap
			}
			c.(*prometheus.GaugeVec).WithLabelValues("free").Set(total - used)
			c.(*prometheus.GaugeVec).WithLabelValues("used").Set(used)
			return nil
		},
		gauge("master", "disk_revocable", "Current disk resources in cluster.", "type"): func(m metricMap, c prometheus.Collector) error {
			total, ok := m["master/disk_revocable_total"]
			used, ok := m["master/disk_revocable_used"]
			if !ok {
				return notFoundInMap
			}
			c.(*prometheus.GaugeVec).WithLabelValues("free").Set(total - used)
			c.(*prometheus.GaugeVec).WithLabelValues("used").Set(used)
			return nil
		},

		// Master stats about uptime and election state
		prometheus.NewGauge(prometheus.GaugeOpts{
			Namespace: "mesos",
			Subsystem: "master",
			Name:      "elected",
			Help:      "1 if master is elected leader, 0 if not",
		}): func(m metricMap, c prometheus.Collector) error {
			elected, ok := m["master/elected"]
			if !ok {
				return notFoundInMap
			}
			c.(prometheus.Gauge).Set(elected)
			return nil
		},
		prometheus.NewGauge(prometheus.GaugeOpts{
			Namespace: "mesos",
			Subsystem: "master",
			Name:      "uptime_seconds",
			Help:      "Number of seconds the master process is running.",
		}): func(m metricMap, c prometheus.Collector) error {
			uptime, ok := m["master/uptime_secs"]
			if !ok {
				return notFoundInMap
			}
			c.(prometheus.Gauge).Set(uptime)
			return nil
		},
		// Master stats about agents
		counter("master", "slave_registration_events_total", "Total number of registration events on this master since it booted.", "event"): func(m metricMap, c prometheus.Collector) error {
			registrations, ok := m["master/slave_registrations"]
			reregistrations, ok := m["master/slave_reregistrations"]
			if !ok {
				return notFoundInMap
			}
			c.(*settableCounterVec).Set(registrations, "register")
			c.(*settableCounterVec).Set(reregistrations, "reregister")
			return nil
		},

		counter("master", "slave_removal_events_total", "Total number of removal events on this master since it booted.", "event"): func(m metricMap, c prometheus.Collector) error {
			scheduled, ok := m["master/slave_shutdowns_scheduled"]
			canceled, ok := m["master/slave_shutdowns_canceled"]
			completed, ok := m["master/slave_shutdowns_completed"]
			removals, ok := m["master/slave_removals"]
			if !ok {
				return notFoundInMap
			}

			c.(*settableCounterVec).Set(scheduled, "scheduled")
			c.(*settableCounterVec).Set(canceled, "canceled")
			c.(*settableCounterVec).Set(completed, "completed")
			c.(*settableCounterVec).Set(removals-completed, "died")
			return nil
		},
		gauge("master", "slaves_state", "Current number of slaves known to the master per connection and registration state.", "connection_state", "registration_state"): func(m metricMap, c prometheus.Collector) error {
			active, ok := m["master/slaves_active"]
			inactive, ok := m["master/slaves_inactive"]
			disconnected, ok := m["master/slaves_disconnected"]

			if !ok {
				return notFoundInMap
			}
			// FIXME: Make sure those assumptions are right
			// Every "active" node is connected to the master
			c.(*prometheus.GaugeVec).WithLabelValues("connected", "active").Set(active)
			// Every "inactive" node is connected but node sending offers
			c.(*prometheus.GaugeVec).WithLabelValues("connected", "inactive").Set(inactive)
			// Every "disconnected" node is "inactive"
			c.(*prometheus.GaugeVec).WithLabelValues("disconnected", "inactive").Set(disconnected)
			// Every "connected" node is either active or inactive
			return nil
		},

		// Master stats about frameworks
		gauge("master", "frameworks_state", "Current number of frames known to the master per connection and registration state.", "connection_state", "registration_state"): func(m metricMap, c prometheus.Collector) error {
			active, ok := m["master/frameworks_active"]
			inactive, ok := m["master/frameworks_inactive"]
			disconnected, ok := m["master/frameworks_disconnected"]

			if !ok {
				return notFoundInMap
			}
			// FIXME: Make sure those assumptions are right
			// Every "active" framework is connected to the master
			c.(*prometheus.GaugeVec).WithLabelValues("connected", "active").Set(active)
			// Every "inactive" framework is connected but framework sending offers
			c.(*prometheus.GaugeVec).WithLabelValues("connected", "inactive").Set(inactive)
			// Every "disconnected" framework is "inactive"
			c.(*prometheus.GaugeVec).WithLabelValues("disconnected", "inactive").Set(disconnected)
			// Every "connected" framework is either active or inactive
			return nil
		},
		prometheus.NewGauge(prometheus.GaugeOpts{
			Namespace: "mesos",
			Subsystem: "master",
			Name:      "offers_pending",
			Help:      "Current number of offers made by the master which aren't yet accepted or declined by frameworks.",
		}): func(m metricMap, c prometheus.Collector) error {
			offers, ok := m["master/outstanding_offers"]
			if !ok {
				return notFoundInMap
			}
			c.(prometheus.Gauge).Set(offers)
			// c.(*prometheus.Gauge).Set(offers)
			return nil
		},
		// Master stats about tasks
		counter("master", "task_states_exit_total", "Total number of tasks processed by exit state.", "state"): func(m metricMap, c prometheus.Collector) error {
			errored, ok := m["master/tasks_error"]
			failed, ok := m["master/tasks_failed"]
			finished, ok := m["master/tasks_finished"]
			killed, ok := m["master/tasks_killed"]
			lost, ok := m["master/tasks_lost"]
			if !ok {
				return notFoundInMap
			}
			c.(*settableCounterVec).Set(errored, "errored")
			c.(*settableCounterVec).Set(failed, "failed")
			c.(*settableCounterVec).Set(finished, "finished")
			c.(*settableCounterVec).Set(killed, "killed")
			c.(*settableCounterVec).Set(lost, "lost")
			return nil
		},
		counter("master", "task_states_current", "Current number of tasks by state.", "state"): func(m metricMap, c prometheus.Collector) error {
			running, ok := m["master/tasks_running"]
			staging, ok := m["master/tasks_staging"]
			starting, ok := m["master/tasks_starting"]
			if !ok {
				return notFoundInMap
			}
			c.(*settableCounterVec).Set(running, "running")
			c.(*settableCounterVec).Set(staging, "staging")
			c.(*settableCounterVec).Set(starting, "starting")
			return nil
		},

		// Master stats about messages
		counter("master", "messages_outcomes_total",
			"Total number of messages by outcome of operation and direction.",
			"source", "destination", "type", "outcome"): func(m metricMap, c prometheus.Collector) error {
			frameworkToExecutorValid, ok := m["master/valid_framework_to_executor_messages"]
			frameworkToExecutorInvalid, ok := m["master/invalid_framework_to_executor_messages"]
			executorToFrameworkValid, ok := m["master/valid_executor_to_framework_messages"]
			executorToFrameworkInvalid, ok := m["master/invalid_executor_to_framework_messages"]

			// status updates are sent from framework?(FIXME) to slave
			// status update acks are sent from slave to framework?
			statusUpdateAckValid, ok := m["master/valid_status_update_acknowledgements"]
			statusUpdateAckInvalid, ok := m["master/invalid_status_update_acknowledgements"]
			statusUpdateValid, ok := m["master/valid_status_updates"]
			statusUpdateInvalid, ok := m["master/invalid_status_updates"]

			if !ok {
				return notFoundInMap
			}
			c.(*settableCounterVec).Set(frameworkToExecutorValid, "framework", "executor", "", "valid")
			c.(*settableCounterVec).Set(frameworkToExecutorInvalid, "framework", "executor", "", "invalid")

			c.(*settableCounterVec).Set(executorToFrameworkValid, "executor", "framework", "", "valid")
			c.(*settableCounterVec).Set(executorToFrameworkInvalid, "executor", "framework", "", "invalid")

			// We consider a ack message simply as a message from slave to framework
			c.(*settableCounterVec).Set(statusUpdateValid, "framework", "slave", "status_update", "valid")
			c.(*settableCounterVec).Set(statusUpdateInvalid, "framework", "slave", "status_update", "invalid")
			c.(*settableCounterVec).Set(statusUpdateAckValid, "slave", "framework", "status_update", "valid")
			c.(*settableCounterVec).Set(statusUpdateAckInvalid, "slave", "framework", "status_update", "invalid")
			return nil
		},
		counter("master", "messages_type_total", "Total number of valid messages by type.", "type"): func(m metricMap, c prometheus.Collector) error {
			for k, v := range m {
				i := strings.Index("master/messages_", k)
				if i == -1 {
					continue
				}
				// FIXME: We expose things like messages_framework_to_executor twice
				c.(*settableCounterVec).Set(v, k[i:])
			}
			return nil
		},

		// Master stats about events
		gauge("master", "event_queue_length", "Current number of elements in event queue by type", "type"): func(m metricMap, c prometheus.Collector) error {
			dispatches, ok := m["master/event_queue_dispatches"]
			httpRequests, ok := m["master/event_queue_http_requests"]
			messages, ok := m["master/event_queue_messages"]
			if !ok {
				return notFoundInMap
			}

			c.(*prometheus.GaugeVec).WithLabelValues("message").Set(messages)
			c.(*prometheus.GaugeVec).WithLabelValues("http_request").Set(httpRequests)
			c.(*prometheus.GaugeVec).WithLabelValues("dispatches").Set(dispatches)
			return nil
		},

		// Master stats about registrar
		prometheus.NewHistogram(prometheus.HistogramOpts{
			Namespace: "mesos",
			Subsystem: "master",
			Name:      "state_store_seconds",
			Help:      "Registry write latency in seconds",
		}): func(m metricMap, c prometheus.Collector) error {
			//	c.(*prometheus.Histogram).Buckets //FIXME
			return nil
		},
	}
	return newMetricCollector(httpClient, metrics)
}
Beispiel #13
0
	refreshHighWatermarks           = "refresh_high_watermarks"
	renderView                      = "render_view"

	cutOff        = "recency_threshold"
	processorName = "processor"
)

var (
	diskLatencyHistogram = &prometheus.HistogramSpecification{
		Starts:                prometheus.LogarithmicSizedBucketsFor(0, 5000),
		BucketBuilder:         prometheus.AccumulatingBucketBuilder(prometheus.EvictAndReplaceWith(10, prometheus.AverageReducer), 100),
		ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.90, 0.99},
	}

	curationDuration          = prometheus.NewCounter()
	curationDurations         = prometheus.NewHistogram(diskLatencyHistogram)
	curationFilterOperations  = prometheus.NewCounter()
	storageOperations         = prometheus.NewCounter()
	storageOperationDurations = prometheus.NewCounter()
	storageLatency            = prometheus.NewHistogram(diskLatencyHistogram)
	queueSizes                = prometheus.NewGauge()
	storedSamplesCount        = prometheus.NewCounter()
)

func recordOutcome(duration time.Duration, err error, success, failure map[string]string) {
	labels := success
	if err != nil {
		labels = failure
	}

	storageOperations.Increment(labels)
Beispiel #14
0
	alive       = "alive"
	failure     = "failure"
	outcome     = "outcome"
	state       = "state"
	success     = "success"
	unreachable = "unreachable"
)

var (
	networkLatencyHistogram = &prometheus.HistogramSpecification{
		Starts:                prometheus.LogarithmicSizedBucketsFor(0, 1000),
		BucketBuilder:         prometheus.AccumulatingBucketBuilder(prometheus.EvictAndReplaceWith(10, prometheus.AverageReducer), 100),
		ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.90, 0.99},
	}

	targetOperationLatencies = prometheus.NewHistogram(networkLatencyHistogram)

	retrievalDurations = prometheus.NewHistogram(&prometheus.HistogramSpecification{
		Starts:                prometheus.LogarithmicSizedBucketsFor(0, 10000),
		BucketBuilder:         prometheus.AccumulatingBucketBuilder(prometheus.EvictAndReplaceWith(10, prometheus.AverageReducer), 100),
		ReportablePercentiles: []float64{0.01, 0.05, 0.5, 0.90, 0.99}})

	targetOperations  = prometheus.NewCounter()
	dnsSDLookupsCount = prometheus.NewCounter()
)

func recordOutcome(err error) {
	message := success
	if err != nil {
		message = failure
	}
Beispiel #15
0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package wal

import "github.com/prometheus/client_golang/prometheus"

var (
	syncDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
		Namespace: "etcd",
		Subsystem: "disk",
		Name:      "wal_fsync_duration_seconds",
		Help:      "The latency distributions of fsync called by wal.",
		Buckets:   prometheus.ExponentialBuckets(0.001, 2, 14),
	})
)

func init() {
	prometheus.MustRegister(syncDurations)
}
Beispiel #16
0
			Buckets:   prometheus.ExponentialBuckets(0.0005, 2, 18),
		}, []string{"type"})

	sendReqHistogram = prometheus.NewHistogramVec(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "tikvclient",
			Name:      "request_seconds",
			Help:      "Bucketed histogram of sending request duration.",
			Buckets:   prometheus.ExponentialBuckets(0.0005, 2, 18),
		}, []string{"type"})

	copBuildTaskHistogram = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "tikvclient",
			Name:      "cop_buildtask_seconds",
			Help:      "Coprocessor buildTask cost time.",
		})

	copTaskLenHistogram = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "tikvclient",
			Name:      "cop_task_len",
			Help:      "Coprocessor task length.",
			Buckets:   prometheus.ExponentialBuckets(1, 2, 11),
		})

	coprocessorCounter = prometheus.NewCounterVec(
		prometheus.CounterOpts{
Beispiel #17
0
package etcdserver

import (
	"time"

	"github.com/coreos/etcd/pkg/runtime"
	"github.com/prometheus/client_golang/prometheus"
)

var (
	// TODO: with label in v3?
	proposeDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
		Namespace: "etcd",
		Subsystem: "server",
		Name:      "proposal_durations_seconds",
		Help:      "The latency distributions of committing proposal.",
		Buckets:   prometheus.ExponentialBuckets(0.001, 2, 14),
	})
	proposePending = prometheus.NewGauge(prometheus.GaugeOpts{
		Namespace: "etcd",
		Subsystem: "server",
		Name:      "pending_proposal_total",
		Help:      "The total number of pending proposals.",
	})
	// This is number of proposal failed in client's view.
	// The proposal might be later got committed in raft.
	proposeFailed = prometheus.NewCounter(prometheus.CounterOpts{
		Namespace: "etcd",
		Subsystem: "server",
		Name:      "proposal_failed_total",
Beispiel #18
0
var (
	queryResultsPath = flag.String("query_results_path",
		"/tmp/qr/",
		"Path where query results files (page_0.json etc.) are stored")

	perPackagePathRe = regexp.MustCompile(`^/perpackage-results/([^/]+)/` +
		strconv.Itoa(resultsPerPackage) + `/page_([0-9]+).json$`)

	queryDurations = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Name: "query_durations_ms",
			Help: "Duration of a query in milliseconds.",
			Buckets: []float64{
				1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
				15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100,
				150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000,
				2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000,
				20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000, 100000,
				500000, 1000000,
			},
		})
)

const (
	// NB: All of these constants needs to match those in static/instant.js.
	packagesPerPage   = 5
	resultsPerPackage = 2
	resultsPerPage    = 10
)
Beispiel #19
0
			Name:      "request_total",
			Help:      "Counter of tikv-server requests.",
		}, []string{"type"})

	sendReqHistogram = prometheus.NewHistogramVec(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "tikvclient",
			Name:      "request_seconds",
			Help:      "Bucketed histogram of sending request duration.",
		}, []string{"type"})

	copBuildTaskHistogram = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "tikvclient",
			Name:      "cop_buildtask_seconds",
			Help:      "Coprocessor buildTask cost time.",
		})

	copTaskLenHistogram = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "tikvclient",
			Name:      "cop_task_len",
			Help:      "Coprocessor task length.",
			Buckets:   prometheus.ExponentialBuckets(1, 2, 11),
		})

	coprocessorCounter = prometheus.NewCounterVec(
		prometheus.CounterOpts{
Beispiel #20
0
// See the License for the specific language governing permissions and
// limitations under the License.

package metrics

import (
	"time"

	"github.com/prometheus/client_golang/prometheus"
)

var (
	queryMetric = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "query",
			Name:      "handle_query_duration_seconds",
			Help:      "Bucketed histogram of processing time (s) of handled queries.",
			Buckets:   prometheus.ExponentialBuckets(0.0005, 2, 13),
		})
)

// Query is used for add query cost time into metrics.
func Query(costTime time.Duration) {
	queryMetric.Observe(float64(costTime))
}

func init() {
	prometheus.MustRegister(queryMetric)
}
Beispiel #21
0
		Namespace: Namespace,
		Subsystem: "engine",
		Name:      "task_failure_count_total",
		Help:      "Counter of engine schedule task failures.",
	}, []string{"type"})

	engineReconcileCount = prometheus.NewCounter(prometheus.CounterOpts{
		Namespace: Namespace,
		Subsystem: "engine",
		Name:      "reconcile_count_total",
		Help:      "Counter of reconcile rounds.",
	})

	engineReconcileDuration = prometheus.NewHistogram(prometheus.HistogramOpts{
		Namespace: Namespace,
		Subsystem: "engine",
		Name:      "reconcile_duration_second",
		Help:      "Histogram of time (in seconds) each schedule round takes.",
	})

	engineReconcileFailureCount = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: Namespace,
		Subsystem: "engine",
		Name:      "reconcile_failure_count_total",
		Help:      "Counter of scheduling failures.",
	}, []string{"type"})

	registryOpCount = prometheus.NewCounterVec(prometheus.CounterOpts{
		Namespace: Namespace,
		Subsystem: "registry",
		Name:      "operation_count_total",
		Help:      "Counter of registry operations.",
Beispiel #22
0
)

const (
	checkInterval       = 5 * time.Minute
	refreshLockDuration = time.Minute * 2
	lockDuration        = time.Minute*8 + refreshLockDuration
	maxBackOff          = 15 * time.Minute
)

var (
	log = capnslog.NewPackageLogger("github.com/coreos/clair", "notifier")

	notifiers = make(map[string]Notifier)

	promNotifierLatencyMilliseconds = prometheus.NewHistogram(prometheus.HistogramOpts{
		Name: "clair_notifier_latency_milliseconds",
		Help: "Time it takes to send a notification after it's been created.",
	})

	promNotifierBackendErrorsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{
		Name: "clair_notifier_backend_errors_total",
		Help: "Number of errors that notifier backends generated.",
	}, []string{"backend"})
)

// Notifier represents anything that can transmit notifications.
type Notifier interface {
	// Configure attempts to initialize the notifier with the provided configuration.
	// It returns whether the notifier is enabled or not.
	Configure(*config.NotifierConfig) (bool, error)
	// Send informs the existence of the specified notification.
	Send(notification database.VulnerabilityNotification) error
Beispiel #23
0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.

package tidb

import (
	"github.com/prometheus/client_golang/prometheus"
)

var (
	sessionExecuteParseDuration = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "server",
			Name:      "session_execute_parse_duration",
			Help:      "Bucketed histogram of processing time (s) in parse SQL.",
			Buckets:   prometheus.LinearBuckets(0.00004, 0.00001, 13),
		})
	sessionExecuteCompileDuration = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "server",
			Name:      "session_execute_compile_duration",
			Help:      "Bucketed histogram of processing time (s) in query optimize.",
			Buckets:   prometheus.LinearBuckets(0.00004, 0.00001, 13),
		})
	sessionExecuteRunDuration = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "tidb",
			Subsystem: "server",
Beispiel #24
0
	uniqueLurkingAgentCount = prometheus.NewGauge(prometheus.GaugeOpts{
		Name:      "unique_lurking_agents",
		Subsystem: "presence",
		Help:      "Number of unique, lurking agents in the presence table.",
	})

	uniqueWebAgentCount = prometheus.NewGauge(prometheus.GaugeOpts{
		Name:      "unique_web_agents",
		Subsystem: "presence",
		Help:      "Number of unique agents using the web client in the presence table.",
	})

	sessionsPerAgent = prometheus.NewHistogram(prometheus.HistogramOpts{
		Name:      "sessions_per_agent",
		Subsystem: "presence",
		Help:      "Number of simultaneous live sessions for each active agent.",
		Buckets:   prometheus.LinearBuckets(0, 1, 10),
	})
)

func init() {
	prometheus.MustRegister(rowCount)
	prometheus.MustRegister(activeRowCount)
	prometheus.MustRegister(activeRowCountPerRoom)
	prometheus.MustRegister(lurkingRowCount)
	prometheus.MustRegister(lurkingRowCountPerRoom)
	prometheus.MustRegister(uniqueAgentCount)
	prometheus.MustRegister(uniqueLurkingAgentCount)
	prometheus.MustRegister(uniqueWebAgentCount)
	prometheus.MustRegister(sessionsPerAgent)
}
Beispiel #25
0
Datei: s.go Projekt: jaqx0r/blts
	"time"

	"github.com/prometheus/client_golang/prometheus"
)

var (
	port = flag.String("port", "8000", "Port to listen on.")
)

var (
	requests = prometheus.NewCounter(prometheus.CounterOpts{
		Name: "requests", Help: "total requests received"})
	errors = prometheus.NewCounterVec(prometheus.CounterOpts{
		Name: "errors", Help: "total errors served"}, []string{"code"})
	latency_ms = prometheus.NewHistogram(prometheus.HistogramOpts{
		Name:    "latency_ms",
		Help:    "request latency in milliseconds",
		Buckets: prometheus.ExponentialBuckets(1, 2, 20)})
	backend_latency_ms = prometheus.NewHistogram(prometheus.HistogramOpts{
		Name:    "backend_latency_ms",
		Help:    "request latency in milliseconds",
		Buckets: prometheus.ExponentialBuckets(1, 2, 20)})
)

func init() {
	prometheus.MustRegister(requests)
	prometheus.MustRegister(errors)
	prometheus.MustRegister(latency_ms)
	prometheus.MustRegister(backend_latency_ms)
}

var (
Beispiel #26
0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package snap

import "github.com/prometheus/client_golang/prometheus"

var (
	// TODO: save_fsync latency?
	saveDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
		Namespace: "etcd_debugging",
		Subsystem: "snap",
		Name:      "save_total_durations_seconds",
		Help:      "The total latency distributions of save called by snapshot.",
		Buckets:   prometheus.ExponentialBuckets(0.001, 2, 14),
	})

	marshallingDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
		Namespace: "etcd_debugging",
		Subsystem: "snap",
		Name:      "save_marshalling_durations_seconds",
		Help:      "The marshalling cost distributions of save called by snapshot.",
		Buckets:   prometheus.ExponentialBuckets(0.001, 2, 14),
	})
)

func init() {
	prometheus.MustRegister(saveDurations)
Beispiel #27
0
	}, []string{"volume"})
	promFileSyncs = prometheus.NewCounterVec(prometheus.CounterOpts{
		Name: "torus_server_file_syncs",
		Help: "Number of times a file has been synced on this server",
	}, []string{"volume"})
	promFileChangedSyncs = prometheus.NewCounterVec(prometheus.CounterOpts{
		Name: "torus_server_file_changed_syncs",
		Help: "Number of times a file has been synced on this server, and the file has changed underneath it",
	}, []string{"volume"})
	promFileWrittenBytes = prometheus.NewCounterVec(prometheus.CounterOpts{
		Name: "torus_server_file_written_bytes",
		Help: "Number of bytes written to a file on this server",
	}, []string{"volume"})
	promFileBlockRead = prometheus.NewHistogram(prometheus.HistogramOpts{
		Name:    "torus_server_file_block_read_us",
		Help:    "Histogram of ms taken to read a block through the layers and into the file abstraction",
		Buckets: prometheus.ExponentialBuckets(50.0, 2, 20),
	})
	promFileBlockWrite = prometheus.NewHistogram(prometheus.HistogramOpts{
		Name:    "torus_server_file_block_write_us",
		Help:    "Histogram of ms taken to write a block through the layers and into the file abstraction",
		Buckets: prometheus.ExponentialBuckets(50.0, 2, 20),
	})
)

func init() {
	prometheus.MustRegister(promOpenINodes)
	prometheus.MustRegister(promOpenFiles)
	prometheus.MustRegister(promFileSyncs)
	prometheus.MustRegister(promFileChangedSyncs)
	prometheus.MustRegister(promFileWrittenBytes)
Beispiel #28
0
			Help:      "Total number of events sent by this member.",
		})

	pendingEventsGauge = prometheus.NewGauge(
		prometheus.GaugeOpts{
			Namespace: "etcd",
			Subsystem: "storage",
			Name:      "pending_events_total",
			Help:      "Total number of pending events to be sent.",
		})

	indexCompactionPauseDurations = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "etcd",
			Subsystem: "storage",
			Name:      "index_compaction_pause_duration_milliseconds",
			Help:      "Bucketed histogram of index compaction pause duration.",
			// 0.5ms -> 1second
			Buckets: prometheus.ExponentialBuckets(0.5, 2, 12),
		})

	dbCompactionPauseDurations = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "etcd",
			Subsystem: "storage",
			Name:      "db_compaction_pause_duration_milliseconds",
			Help:      "Bucketed histogram of db compaction pause duration.",
			// 1ms -> 4second
			Buckets: prometheus.ExponentialBuckets(1, 2, 13),
		})
Beispiel #29
0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package backend

import "github.com/prometheus/client_golang/prometheus"

var (
	commitDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
		Namespace: "etcd",
		Subsystem: "disk",
		Name:      "backend_commit_duration_seconds",
		Help:      "The latency distributions of commit called by backend.",
		Buckets:   prometheus.ExponentialBuckets(0.001, 2, 14),
	})
)

func init() {
	prometheus.MustRegister(commitDurations)
}
Beispiel #30
0
import (
	"sync"
	"time"

	"github.com/prometheus/client_golang/prometheus"
)

const schedulerSubsystem = "scheduler"

var BindingSaturationReportInterval = 1 * time.Second

var (
	E2eSchedulingLatency = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Subsystem: schedulerSubsystem,
			Name:      "e2e_scheduling_latency_microseconds",
			Help:      "E2e scheduling latency (scheduling algorithm + binding)",
			Buckets:   prometheus.ExponentialBuckets(1000, 2, 15),
		},
	)
	SchedulingAlgorithmLatency = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Subsystem: schedulerSubsystem,
			Name:      "scheduling_algorithm_latency_microseconds",
			Help:      "Scheduling algorithm latency",
			Buckets:   prometheus.ExponentialBuckets(1000, 2, 15),
		},
	)
	BindingLatency = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Subsystem: schedulerSubsystem,
			Name:      "binding_latency_microseconds",