Пример #1
0
// New wraps the datastore, providing metrics on the operations. The
// metrics are registered with names starting with prefix and a dot.
//
// If prefix is not unique, New will panic. Call Close to release the
// prefix.
func New(prefix string, ds datastore.Datastore) DatastoreCloser {
	m := &measure{
		backend: ds,

		putNum:     metrics.Counter(prefix + ".Put.num"),
		putErr:     metrics.Counter(prefix + ".Put.err"),
		putLatency: metrics.NewHistogram(prefix+".Put.latency", 0, maxLatency, 3),
		putSize:    metrics.NewHistogram(prefix+".Put.size", 0, maxSize, 3),

		getNum:     metrics.Counter(prefix + ".Get.num"),
		getErr:     metrics.Counter(prefix + ".Get.err"),
		getLatency: metrics.NewHistogram(prefix+".Get.latency", 0, maxLatency, 3),
		getSize:    metrics.NewHistogram(prefix+".Get.size", 0, maxSize, 3),

		hasNum:     metrics.Counter(prefix + ".Has.num"),
		hasErr:     metrics.Counter(prefix + ".Has.err"),
		hasLatency: metrics.NewHistogram(prefix+".Has.latency", 0, maxLatency, 3),

		deleteNum:     metrics.Counter(prefix + ".Delete.num"),
		deleteErr:     metrics.Counter(prefix + ".Delete.err"),
		deleteLatency: metrics.NewHistogram(prefix+".Delete.latency", 0, maxLatency, 3),

		queryNum:     metrics.Counter(prefix + ".Query.num"),
		queryErr:     metrics.Counter(prefix + ".Query.err"),
		queryLatency: metrics.NewHistogram(prefix+".Query.latency", 0, maxLatency, 3),
	}
	return m
}
Пример #2
0
func TestHistogramRemove(t *testing.T) {
	metrics.Reset()

	h := metrics.NewHistogram("heyo", 1, 1000, 3)
	h.Remove()

	_, gauges := metrics.Snapshot()
	if v, ok := gauges["heyo.P50"]; ok {
		t.Errorf("Gauge was %v, but expected nothing", v)
	}
}
Пример #3
0
func RecordDeliveryTime(name string, t time.Duration) {
	hm.Lock()
	if _, ok := deliveryTimerHists[name]; !ok {
		h := metrics.NewHistogram(epMetricName(name, "processing-time"), 0, 300*1000, 4)
		deliveryTimerHists[name] = h
	}
	hm.Unlock()

	h := deliveryTimerHists[name]

	h.RecordValue(int64(t.Nanoseconds() / 1000000))
}
Пример #4
0
func BenchmarkHistogramRecordValue(b *testing.B) {
	metrics.Reset()
	h := metrics.NewHistogram("hist", 1, 1000, 3)

	b.ReportAllocs()
	b.ResetTimer()

	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			h.RecordValue(100)
		}
	})
}
Пример #5
0
func (s *TestSuite) TestHistogram(c *C) {

	expected := map[string]int64{
		"foo.P50":  5,
		"foo.P75":  5,
		"foo.P90":  15,
		"foo.P95":  15,
		"foo.P99":  15,
		"foo.P999": 15,
	}

	h := metrics.NewHistogram("foo", 0, 100, 4)

	h.RecordValue(1)
	h.RecordValue(5)
	h.RecordValue(5)
	h.RecordValue(15)

	_, g := metrics.Snapshot()
	log.Printf("g: %+v", g)

	c.Assert(g, DeepEquals, expected)
}
Пример #6
0
func TestHistogram(t *testing.T) {
	metrics.Reset()

	h := metrics.NewHistogram("heyo", 1, 1000, 3)
	for i := 100; i > 0; i-- {
		for j := 0; j < i; j++ {
			h.RecordValue(int64(i))
		}
	}

	_, gauges := metrics.Snapshot()

	if v, want := gauges["heyo.P50"], int64(71); v != want {
		t.Errorf("P50 was %v, but expected %v", v, want)
	}

	if v, want := gauges["heyo.P75"], int64(87); v != want {
		t.Errorf("P75 was %v, but expected %v", v, want)
	}

	if v, want := gauges["heyo.P90"], int64(95); v != want {
		t.Errorf("P90 was %v, but expected %v", v, want)
	}

	if v, want := gauges["heyo.P95"], int64(98); v != want {
		t.Errorf("P95 was %v, but expected %v", v, want)
	}

	if v, want := gauges["heyo.P99"], int64(100); v != want {
		t.Errorf("P99 was %v, but expected %v", v, want)
	}

	if v, want := gauges["heyo.P999"], int64(100); v != want {
		t.Errorf("P999 was %v, but expected %v", v, want)
	}
}
Пример #7
0
//
//     HTTP.Requests
//     HTTP.Responses
//     HTTP.Latency.{P50,P75,P90,P95,P99,P999}
//
// By tracking incoming requests and outgoing responses, one can monitor not
// only the requests per second, but also the number of requests being processed
// at any given point in time.
func Wrap(h http.Handler) http.Handler {
	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
		requests.Add()                  // inc requests
		defer responses.Add()           // inc responses when we're done
		defer recordLatency(time.Now()) // record latency when we're done

		h.ServeHTTP(w, r)
	})
}

var (
	requests  = metrics.Counter("HTTP.Requests")
	responses = metrics.Counter("HTTP.Responses")

	// a five-minute window tracking 1ms-3min
	latency = metrics.NewHistogram("HTTP.Latency", 1, 1000*60*3, 3)
)

func recordLatency(start time.Time) {
	elapsedMS := time.Now().Sub(start).Seconds() * 1000.0
	_ = latency.RecordValue(int64(elapsedMS))
}
Пример #8
0
func NewHist(name string) *metrics.Histogram {
	if name != "" && archaius.Conf.Collect {
		return metrics.NewHistogram(name, 1000, 100000000, 5)
	}
	return nil
}