func init() { msg := &memStatGauges{} metrics.Counter("Mem.NumGC").SetBatchFunc(key{}, msg.init, msg.numGC) metrics.Counter("Mem.PauseTotalNs").SetBatchFunc(key{}, msg.init, msg.totalPause) metrics.Gauge("Mem.LastGC").SetBatchFunc(key{}, msg.init, msg.lastPause) metrics.Gauge("Mem.Alloc").SetBatchFunc(key{}, msg.init, msg.alloc) metrics.Gauge("Mem.HeapObjects").SetBatchFunc(key{}, msg.init, msg.objects) }
func TestGaugeRemove(t *testing.T) { metrics.Reset() metrics.Gauge("whee").Set(1) metrics.Gauge("whee").Remove() _, gauges := metrics.Snapshot() if v, ok := gauges["whee"]; ok { t.Errorf("Gauge was %v, but expected nothing", v) } }
func init() { metrics.Gauge("FileDescriptors.Max").SetFunc(func() int64 { v, err := getFDLimit() if err != nil { return 0 } return int64(v) }) metrics.Gauge("FileDescriptors.Used").SetFunc(func() int64 { v, err := getFDUsage() if err != nil { return 0 } return int64(v) }) }
func TestGaugeValue(t *testing.T) { metrics.Reset() metrics.Gauge("whee").Set(-100) _, gauges := metrics.Snapshot() if v, want := gauges["whee"], int64(-100); v != want { t.Errorf("Gauge was %v, but expected %v", v, want) } }
func BenchmarkGaugeSet(b *testing.B) { metrics.Reset() b.ReportAllocs() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { metrics.Gauge("test2").Set(100) } }) }
func (r *ReportResource) Gauge(res http.ResponseWriter, req *http.Request) { var g api.Gauge if err := rest.Bind(req, &g); err != nil { log.Print(err) rest.SetBadRequestResponse(res) return } metrics.Gauge(g.Name).Set(g.N) if err := rest.SetOKResponse(res, g); err != nil { rest.SetInternalServerErrorResponse(res, err) return } }
func init() { metrics.Gauge("Goroutines.Num").SetFunc(func() int64 { return int64(runtime.NumGoroutine()) }) }