Пример #1
0
func TestCounterBatchFunc(t *testing.T) {
	metrics.Reset()

	var a, b uint64

	metrics.Counter("whee").SetBatchFunc(
		"yay",
		func() {
			a, b = 1, 2
		},
		func() uint64 {
			return a
		},
	)

	metrics.Counter("woo").SetBatchFunc(
		"yay",
		func() {
			a, b = 1, 2
		},
		func() uint64 {
			return b
		},
	)

	counters, _ := metrics.Snapshot()
	if v, want := counters["whee"], uint64(1); v != want {
		t.Errorf("Counter was %v, but expected %v", v, want)
	}

	if v, want := counters["woo"], uint64(2); v != want {
		t.Errorf("Counter was %v, but expected %v", v, want)
	}
}
Пример #2
0
// New wraps the datastore, providing metrics on the operations. The
// metrics are registered with names starting with prefix and a dot.
//
// If prefix is not unique, New will panic. Call Close to release the
// prefix.
func New(prefix string, ds datastore.Datastore) *measure {
	m := &measure{
		backend: ds,

		putNum:     metrics.Counter(prefix + ".Put.num"),
		putErr:     metrics.Counter(prefix + ".Put.err"),
		putLatency: metrics.NewHistogram(prefix+".Put.latency", 0, maxLatency, 3),
		putSize:    metrics.NewHistogram(prefix+".Put.size", 0, maxSize, 3),

		getNum:     metrics.Counter(prefix + ".Get.num"),
		getErr:     metrics.Counter(prefix + ".Get.err"),
		getLatency: metrics.NewHistogram(prefix+".Get.latency", 0, maxLatency, 3),
		getSize:    metrics.NewHistogram(prefix+".Get.size", 0, maxSize, 3),

		hasNum:     metrics.Counter(prefix + ".Has.num"),
		hasErr:     metrics.Counter(prefix + ".Has.err"),
		hasLatency: metrics.NewHistogram(prefix+".Has.latency", 0, maxLatency, 3),

		deleteNum:     metrics.Counter(prefix + ".Delete.num"),
		deleteErr:     metrics.Counter(prefix + ".Delete.err"),
		deleteLatency: metrics.NewHistogram(prefix+".Delete.latency", 0, maxLatency, 3),

		queryNum:     metrics.Counter(prefix + ".Query.num"),
		queryErr:     metrics.Counter(prefix + ".Query.err"),
		queryLatency: metrics.NewHistogram(prefix+".Query.latency", 0, maxLatency, 3),
	}
	return m
}
Пример #3
0
func init() {
	msg := &memStatGauges{}

	metrics.Counter("Mem.NumGC").SetBatchFunc(key{}, msg.init, msg.numGC)
	metrics.Counter("Mem.PauseTotalNs").SetBatchFunc(key{}, msg.init, msg.totalPause)

	metrics.Gauge("Mem.LastGC").SetBatchFunc(key{}, msg.init, msg.lastPause)
	metrics.Gauge("Mem.Alloc").SetBatchFunc(key{}, msg.init, msg.alloc)
	metrics.Gauge("Mem.HeapObjects").SetBatchFunc(key{}, msg.init, msg.objects)
}
Пример #4
0
func TestCounter(t *testing.T) {
	metrics.Reset()

	metrics.Counter("whee").Add()
	metrics.Counter("whee").AddN(10)

	counters, _ := metrics.Snapshot()
	if v, want := counters["whee"], uint64(11); v != want {
		t.Errorf("Counter was %v, but expected %v", v, want)
	}
}
Пример #5
0
func TestCounterRemove(t *testing.T) {
	metrics.Reset()

	metrics.Counter("whee").Add()
	metrics.Counter("whee").Remove()

	counters, _ := metrics.Snapshot()
	if v, ok := counters["whee"]; ok {
		t.Errorf("Counter was %v, but expected nothing", v)
	}
}
Пример #6
0
func TestCounterFunc(t *testing.T) {
	metrics.Reset()

	metrics.Counter("whee").SetFunc(func() uint64 {
		return 100
	})

	counters, _ := metrics.Snapshot()
	if v, want := counters["whee"], uint64(100); v != want {
		t.Errorf("Counter was %v, but expected %v", v, want)
	}
}
Пример #7
0
func BenchmarkCounterAddN(b *testing.B) {
	metrics.Reset()

	b.ReportAllocs()
	b.ResetTimer()

	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			metrics.Counter("test2").AddN(100)
		}
	})
}