Ejemplo n.º 1
0
// Collect implements prometheus.Collector.
func (c *viewCollector) Collect(ch chan<- prometheus.Metric) {
	for _, v := range c.stats.Views {
		for _, s := range v.Cache {
			ch <- prometheus.MustNewConstMetric(
				resolverCache, prometheus.GaugeValue, float64(s.Gauge), v.Name, s.Name,
			)
		}
		for _, s := range v.ResolverQueries {
			ch <- prometheus.MustNewConstMetric(
				resolverQueries, prometheus.CounterValue, float64(s.Counter), v.Name, s.Name,
			)
		}
		for _, s := range v.ResolverStats {
			if desc, ok := resolverMetricStats[s.Name]; ok {
				ch <- prometheus.MustNewConstMetric(
					desc, prometheus.CounterValue, float64(s.Counter), v.Name,
				)
			}
			if desc, ok := resolverLabelStats[s.Name]; ok {
				ch <- prometheus.MustNewConstMetric(
					desc, prometheus.CounterValue, float64(s.Counter), v.Name, s.Name,
				)
			}
		}
		if buckets, count, err := histogram(v.ResolverStats); err == nil {
			ch <- prometheus.MustNewConstHistogram(
				resolverQueryDuration, count, math.NaN(), buckets, v.Name,
			)
		} else {
			log.Warn("Error parsing RTT:", err)
		}
	}
}
Ejemplo n.º 2
0
func ExampleConstHistogram() {
	desc := prometheus.NewDesc(
		"http_request_duration_seconds",
		"A histogram of the HTTP request durations.",
		[]string{"code", "method"},
		prometheus.Labels{"owner": "example"},
	)

	// Create a constant histogram from values we got from a 3rd party telemetry system.
	h := prometheus.MustNewConstHistogram(
		desc,
		4711, 403.34,
		map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233},
		"200", "get",
	)

	// Just for demonstration, let's check the state of the histogram by
	// (ab)using its Write method (which is usually only used by Prometheus
	// internally).
	metric := &dto.Metric{}
	h.Write(metric)
	fmt.Println(proto.MarshalTextString(metric))

	// Output:
	// label: <
	//   name: "code"
	//   value: "200"
	// >
	// label: <
	//   name: "method"
	//   value: "get"
	// >
	// label: <
	//   name: "owner"
	//   value: "example"
	// >
	// histogram: <
	//   sample_count: 4711
	//   sample_sum: 403.34
	//   bucket: <
	//     cumulative_count: 121
	//     upper_bound: 25
	//   >
	//   bucket: <
	//     cumulative_count: 2403
	//     upper_bound: 50
	//   >
	//   bucket: <
	//     cumulative_count: 3221
	//     upper_bound: 100
	//   >
	//   bucket: <
	//     cumulative_count: 4233
	//     upper_bound: 200
	//   >
	// >
}
Ejemplo n.º 3
0
func histogramMetric(desc *prometheus.Desc, h *stats.Histogram, scale float64, labels ...string) prometheus.Metric {
	count := uint64(0)
	sum := float64(h.Total()) * scale
	cutoffs := h.Cutoffs()
	statBuckets := h.Buckets()
	promBuckets := make(map[float64]uint64, len(cutoffs))
	for i, cutoff := range cutoffs {
		upperBound := float64(cutoff) * scale
		count += uint64(statBuckets[i])
		promBuckets[upperBound] = count
	}
	count += uint64(statBuckets[len(statBuckets)-1])
	return prometheus.MustNewConstHistogram(desc, count, sum, promBuckets, labels...)
}
func processQueryResponseTimeTable(db *sql.DB, ch chan<- prometheus.Metric, query string, i int) error {
	queryDistributionRows, err := db.Query(query)
	if err != nil {
		return err
	}
	defer queryDistributionRows.Close()

	var (
		length       string
		count        uint64
		total        string
		histogramCnt uint64
		histogramSum float64
		countBuckets = map[float64]uint64{}
	)

	for queryDistributionRows.Next() {
		err = queryDistributionRows.Scan(
			&length,
			&count,
			&total,
		)
		if err != nil {
			return err
		}

		length, _ := strconv.ParseFloat(strings.TrimSpace(length), 64)
		total, _ := strconv.ParseFloat(strings.TrimSpace(total), 64)
		histogramCnt += count
		histogramSum += total
		// Special case for "TOO LONG" row where we take into account the count field which is the only available
		// and do not add it as a part of histogram or metric
		if length == 0 {
			continue
		}
		countBuckets[length] = histogramCnt
	}
	// Create histogram with query counts
	ch <- prometheus.MustNewConstHistogram(
		infoSchemaQueryResponseTimeCountDescs[i], histogramCnt, histogramSum, countBuckets,
	)
	return nil
}
Ejemplo n.º 5
0
// Creates a fixed-value response time histogram from the following stats counters:
// answers0-1, answers1-10, answers10-100, answers100-1000, answers-slow
func makeRecursorRTimeHistogram(statsMap map[string]float64) (prometheus.Metric, error) {
	buckets := make(map[float64]uint64)
	var count uint64
	for k, v := range rTimeBucketMap {
		if _, ok := statsMap[k]; !ok {
			return nil, fmt.Errorf("Required PowerDNS stats key not found: %s", k)
		}
		value := statsMap[k]
		if v != 0 {
			buckets[v] = uint64(value)
		}
		count += uint64(value)
	}

	// Convert linear buckets to cumulative buckets
	var keys []float64
	for k, _ := range buckets {
		keys = append(keys, k)
	}
	sort.Float64s(keys)
	var cumsum uint64
	for _, k := range keys {
		cumsum = cumsum + buckets[k]
		buckets[k] = cumsum
	}

	desc := prometheus.NewDesc(
		namespace+"_recursor_response_time_seconds",
		"Histogram of PowerDNS recursor response times in seconds.",
		[]string{},
		prometheus.Labels{},
	)

	h := prometheus.MustNewConstHistogram(desc, count, 0, buckets)
	return h, nil
}
func TestScrapeQueryResponseTime(t *testing.T) {
	db, mock, err := sqlmock.New()
	if err != nil {
		t.Fatalf("error opening a stub database connection: %s", err)
	}
	defer db.Close()

	mock.ExpectQuery(queryResponseCheckQuery).WillReturnRows(sqlmock.NewRows([]string{""}).AddRow(1))

	rows := sqlmock.NewRows([]string{"TIME", "COUNT", "TOTAL"}).
		AddRow(0.000001, 124, 0.000000).
		AddRow(0.000010, 179, 0.000797).
		AddRow(0.000100, 2859, 0.107321).
		AddRow(0.001000, 1085, 0.335395).
		AddRow(0.010000, 269, 0.522264).
		AddRow(0.100000, 11, 0.344209).
		AddRow(1.000000, 1, 0.267369).
		AddRow(10.000000, 0, 0.000000).
		AddRow(100.000000, 0, 0.000000).
		AddRow(1000.000000, 0, 0.000000).
		AddRow(10000.000000, 0, 0.000000).
		AddRow(100000.000000, 0, 0.000000).
		AddRow(1000000.000000, 0, 0.000000).
		AddRow("TOO LONG", 0, "TOO LONG")
	mock.ExpectQuery(sanitizeQuery(queryResponseTimeQuery)).WillReturnRows(rows)

	ch := make(chan prometheus.Metric)
	go func() {
		if err = ScrapeQueryResponseTime(db, ch); err != nil {
			t.Errorf("error calling function on test: %s", err)
		}
		close(ch)
	}()

	// Test counters
	expectTimes := []MetricResult{
		{labels: labelMap{"le": "1e-06"}, value: 0},
		{labels: labelMap{"le": "1e-05"}, value: 0.000797},
		{labels: labelMap{"le": "0.0001"}, value: 0.108118},
		{labels: labelMap{"le": "0.001"}, value: 0.443513},
		{labels: labelMap{"le": "0.01"}, value: 0.9657769999999999},
		{labels: labelMap{"le": "0.1"}, value: 1.3099859999999999},
		{labels: labelMap{"le": "1"}, value: 1.5773549999999998},
		{labels: labelMap{"le": "10"}, value: 1.5773549999999998},
		{labels: labelMap{"le": "100"}, value: 1.5773549999999998},
		{labels: labelMap{"le": "1000"}, value: 1.5773549999999998},
		{labels: labelMap{"le": "10000"}, value: 1.5773549999999998},
		{labels: labelMap{"le": "100000"}, value: 1.5773549999999998},
		{labels: labelMap{"le": "1e+06"}, value: 1.5773549999999998},
		{labels: labelMap{"le": "+Inf"}, value: 1.5773549999999998},
	}
	convey.Convey("Metrics comparison", t, func() {
		for _, expect := range expectTimes {
			got := readMetric(<-ch)
			convey.So(expect, convey.ShouldResemble, got)
		}
	})

	// Test histogram
	expectCounts := map[float64]uint64{
		1e-06:  124,
		1e-05:  303,
		0.0001: 3162,
		0.001:  4247,
		0.01:   4516,
		0.1:    4527,
		1:      4528,
		10:     4528,
		100:    4528,
		1000:   4528,
		10000:  4528,
		100000: 4528,
		1e+06:  4528,
	}
	expectHistogram := prometheus.MustNewConstHistogram(infoSchemaQueryResponseTimeCountDesc,
		4528, 1.5773549999999998, expectCounts)
	expectPb := &dto.Metric{}
	expectHistogram.Write(expectPb)

	gotPb := &dto.Metric{}
	gotHistogram := <-ch // read the last item from channel
	gotHistogram.Write(gotPb)
	convey.Convey("Histogram comparison", t, func() {
		convey.So(expectPb.Histogram, convey.ShouldResemble, gotPb.Histogram)
	})

	// Ensure all SQL queries were executed
	if err := mock.ExpectationsWereMet(); err != nil {
		t.Errorf("there were unfulfilled expections: %s", err)
	}
}
// ScrapeQueryResponseTime collects from `information_schema.query_response_time`.
func ScrapeQueryResponseTime(db *sql.DB, ch chan<- prometheus.Metric) error {
	var queryStats uint8
	err := db.QueryRow(queryResponseCheckQuery).Scan(&queryStats)
	if err != nil {
		log.Debugln("Query response time distribution is not present.")
		return nil
	}
	if queryStats == 0 {
		log.Debugln("query_response_time_stats is OFF.")
		return nil
	}

	queryDistributionRows, err := db.Query(queryResponseTimeQuery)
	if err != nil {
		return err
	}
	defer queryDistributionRows.Close()

	var (
		length       string
		count        uint64
		total        string
		histogramCnt uint64
		histogramSum float64
		countBuckets = map[float64]uint64{}
	)

	for queryDistributionRows.Next() {
		err = queryDistributionRows.Scan(
			&length,
			&count,
			&total,
		)
		if err != nil {
			return err
		}

		length, _ := strconv.ParseFloat(strings.TrimSpace(length), 64)
		total, _ := strconv.ParseFloat(strings.TrimSpace(total), 64)
		histogramCnt += count
		histogramSum += total
		// Special case for "TOO LONG" row where we take into account the count field which is the only available
		// and do not add it as a part of histogram or metric
		if length == 0 {
			continue
		}
		countBuckets[length] = histogramCnt
		// No histogram with query total times because they are float
		ch <- prometheus.MustNewConstMetric(
			infoSchemaQueryResponseTimeTotalDesc, prometheus.CounterValue, histogramSum,
			fmt.Sprintf("%v", length),
		)
	}
	ch <- prometheus.MustNewConstMetric(
		infoSchemaQueryResponseTimeTotalDesc, prometheus.CounterValue, histogramSum,
		"+Inf",
	)
	// Create histogram with query counts
	ch <- prometheus.MustNewConstHistogram(
		infoSchemaQueryResponseTimeCountDesc, histogramCnt, histogramSum, countBuckets,
	)
	return nil
}